Skip to content

Commit b294bb0

Browse files
authored
Merge pull request #7493 from continuedev/jacob/con-3748
docs: add lots of comments
2 parents e34bc00 + 3ba290a commit b294bb0

File tree

11 files changed

+128
-401
lines changed

11 files changed

+128
-401
lines changed

core/core.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -668,6 +668,8 @@ export class Core {
668668
return queue.dequeueProcessed() || null;
669669
});
670670

671+
// NOTE: This is not used unless prefetch is used.
672+
// At this point this is not used because I opted to rely on the model to return multiple diffs than to use prefetching.
671673
on("nextEdit/queue/processOne", async (msg) => {
672674
console.log("nextEdit/queue/processOne");
673675
const { ctx, recentlyVisitedRanges, recentlyEditedRanges } = msg.data;

core/nextEdit/NextEditEditableRegionCalculator.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,10 @@ export enum EditableRegionStrategy {
1313
Static = "static",
1414
}
1515

16+
/**
17+
* This was an attempt to find next edit locations deterministically.
18+
* I was intending to use this in tandem with the prefetching logic, but we are not using it anymore.
19+
*/
1620
export async function getNextEditableRegion(
1721
strategy: EditableRegionStrategy,
1822
ctx: any,

core/nextEdit/NextEditPrefetchQueue.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ export interface ProcessedItem {
1010
/**
1111
* Keeps a queue of the broken down diffs from a changed editable range, as determined in core/nextEdit/diff.ts
1212
*/
13+
/**
14+
* This is where the chain is stored. Think of it as a regular queue, but being a singleton because we need one source of truth for the chain.
15+
* I originally intended this to be a separate data structure to handle prefetching next edit outcomes from the model in the background.
16+
* Due to subpar results, lack of satisfactory next edit location suggestion algorithms and token cost/latency issues, I scratched the idea.
17+
*/
1318
export class PrefetchQueue {
1419
private static instance: PrefetchQueue | null = null;
1520

core/nextEdit/NextEditProvider.ts

Lines changed: 25 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ import { HelperVars } from "../autocomplete/util/HelperVars.js";
1919
import { AutocompleteInput } from "../autocomplete/util/types.js";
2020
import { isSecurityConcern } from "../indexing/ignore.js";
2121
import { modelSupportsNextEdit } from "../llm/autodetect.js";
22-
import { countTokens } from "../llm/countTokens.js";
2322
import { localPathOrUriToPath } from "../util/pathToUri.js";
2423
import { createDiff, DiffFormatType } from "./context/diffFormatting.js";
2524
import { DocumentHistoryTracker } from "./DocumentHistoryTracker.js";
@@ -45,6 +44,14 @@ const ERRORS_TO_IGNORE = [
4544
"operation was aborted",
4645
];
4746

47+
/**
48+
* This is the next edit analogue to autocomplete's CompletionProvider.
49+
* You will see a lot of similar if not identical methods to CompletionProvider methods.
50+
* All logic used to live inside this class, but that became untenable quickly.
51+
* I moved a lot of the model-specific logic (prompt building, pre/post processing, etc.) to the BaseNextEditProvider and the children inheriting from it.
52+
* Keeping this class around might be a good idea because it handles lots of delicate logic such as abort signals, chains, logging, etc.
53+
* There being a singleton also gives a lot of guarantees about the state of the next edit state machine.
54+
*/
4855
export class NextEditProvider {
4956
private static instance: NextEditProvider | null = null;
5057

@@ -62,7 +69,6 @@ export class NextEditProvider {
6269
private currentEditChainId: string | null = null;
6370
private previousRequest: AutocompleteInput | null = null;
6471
private previousCompletions: NextEditOutcome[] = [];
65-
// private nextEditableRegionsInTheCurrentChain: RangeInFile[] = [];
6672

6773
// Model-specific provider instance.
6874
private modelProvider: BaseNextEditModelProvider | null = null;
@@ -223,14 +229,14 @@ export class NextEditProvider {
223229

224230
this.currentEditChainId = null;
225231
this.previousCompletions = [];
226-
// TODO: this should be cleaned up in the prefetch queue.
227-
// this.nextEditableRegionsInTheCurrentChain = [];
228232

229233
if (this.previousRequest) {
230234
const fileContent = (
231235
await this.ide.readFile(this.previousRequest.filepath)
232236
).toString();
237+
233238
const ast = await getAst(this.previousRequest.filepath, fileContent);
239+
234240
if (ast) {
235241
DocumentHistoryTracker.getInstance().push(
236242
localPathOrUriToPath(this.previousRequest.filepath),
@@ -253,6 +259,9 @@ export class NextEditProvider {
253259
return this.previousCompletions.length === 1;
254260
}
255261

262+
/**
263+
* This is the main entry point to this class.
264+
*/
256265
public async provideInlineCompletionItems(
257266
input: AutocompleteInput,
258267
token: AbortSignal | undefined,
@@ -370,6 +379,8 @@ export class NextEditProvider {
370379
throw new Error("Model provider not initialized");
371380
}
372381

382+
// NOTE: getAllSnippetsWithoutRace doesn't seem to incur much performance penalties when compared to getAllSnippets.
383+
// Use getAllSnippets if snippet gathering becomes noticably slow.
373384
const [snippetPayload, workspaceDirs] = await Promise.all([
374385
getAllSnippetsWithoutRace({
375386
helper,
@@ -445,6 +456,9 @@ export class NextEditProvider {
445456
}
446457

447458
// Send prompts to LLM (using only user prompt for fine-tuned models).
459+
// prompts[1] extracts the user prompt from the system-user prompt pair.
460+
// NOTE: Stream is currently set to false, but this should ideally be a per-model flag.
461+
// Mercury Coder currently does not support streaming.
448462
const msg: ChatMessage = await llm.chat([prompts[1]], token, {
449463
stream: false,
450464
});
@@ -484,7 +498,7 @@ export class NextEditProvider {
484498
}
485499

486500
if (outcome) {
487-
// Handle NextEditProvider-specific state
501+
// Handle NextEditProvider-specific state.
488502
this.previousCompletions.push(outcome);
489503

490504
// Mark as displayed for JetBrains
@@ -494,97 +508,6 @@ export class NextEditProvider {
494508
return outcome;
495509
}
496510

497-
private _calculateOptimalEditableRegion(
498-
helper: HelperVars,
499-
heuristic: "fourChars" | "tokenizer" = "tokenizer",
500-
): {
501-
editableRegionStartLine: number;
502-
editableRegionEndLine: number;
503-
} {
504-
const cursorLine = helper.pos.line;
505-
const fileLines = helper.fileLines;
506-
const MAX_TOKENS = 512;
507-
508-
// Initialize with cursor line.
509-
let editableRegionStartLine = cursorLine;
510-
let editableRegionEndLine = cursorLine;
511-
512-
// Get initial content and token count.
513-
let currentContent = fileLines[cursorLine];
514-
let totalTokens =
515-
heuristic === "tokenizer"
516-
? countTokens(currentContent, helper.modelName)
517-
: Math.ceil(currentContent.length / 4);
518-
519-
// Expand outward alternating between adding lines above and below.
520-
let addingAbove = true;
521-
522-
while (totalTokens < MAX_TOKENS) {
523-
let addedLine = false;
524-
525-
if (addingAbove) {
526-
// Try to add a line above.
527-
if (editableRegionStartLine > 0) {
528-
editableRegionStartLine--;
529-
const lineContent = fileLines[editableRegionStartLine];
530-
const lineTokens =
531-
heuristic === "tokenizer"
532-
? countTokens(lineContent, helper.modelName)
533-
: Math.ceil(lineContent.length / 4);
534-
535-
totalTokens += lineTokens;
536-
addedLine = true;
537-
}
538-
} else {
539-
// Try to add a line below.
540-
if (editableRegionEndLine < fileLines.length - 1) {
541-
editableRegionEndLine++;
542-
const lineContent = fileLines[editableRegionEndLine];
543-
const lineTokens =
544-
heuristic === "tokenizer"
545-
? countTokens(lineContent, helper.modelName)
546-
: Math.ceil(lineContent.length / 4);
547-
548-
totalTokens += lineTokens;
549-
addedLine = true;
550-
}
551-
}
552-
553-
// If we can't add in the current direction, try the other.
554-
if (!addedLine) {
555-
// If we're already at both file boundaries, we're done.
556-
if (
557-
editableRegionStartLine === 0 &&
558-
editableRegionEndLine === fileLines.length - 1
559-
) {
560-
break;
561-
}
562-
563-
// If we couldn't add in one direction, force the next attempt in the other direction.
564-
addingAbove = !addingAbove;
565-
continue;
566-
}
567-
568-
// If we exceeded the token limit, revert the last addition.
569-
if (totalTokens > MAX_TOKENS) {
570-
if (addingAbove) {
571-
editableRegionStartLine++;
572-
} else {
573-
editableRegionEndLine--;
574-
}
575-
break;
576-
}
577-
578-
// Alternate between adding above and below for balanced context.
579-
addingAbove = !addingAbove;
580-
}
581-
582-
return {
583-
editableRegionStartLine,
584-
editableRegionEndLine,
585-
};
586-
}
587-
588511
private async _markDisplayedIfJetBrains(
589512
completionId: string,
590513
outcome: NextEditOutcome,
@@ -595,6 +518,12 @@ export class NextEditProvider {
595518
}
596519
}
597520

521+
/**
522+
* This is a wrapper around provideInlineCompletionItems.
523+
* This is invoked when we call the model in the background using prefetch.
524+
* It's not currently used anywhere (references are not used either), but I decided to keep it in case we actually need to use prefetch.
525+
* You will see that calls to this method is made from NextEditPrefetchQueue.proecss(), which is wrapped in `if (!this.usingFullFileDiff)`.
526+
*/
598527
public async provideInlineCompletionItemsWithChain(
599528
ctx: {
600529
completionId: string;

0 commit comments

Comments
 (0)