@@ -19,7 +19,6 @@ import { HelperVars } from "../autocomplete/util/HelperVars.js";
1919import { AutocompleteInput } from "../autocomplete/util/types.js" ;
2020import { isSecurityConcern } from "../indexing/ignore.js" ;
2121import { modelSupportsNextEdit } from "../llm/autodetect.js" ;
22- import { countTokens } from "../llm/countTokens.js" ;
2322import { localPathOrUriToPath } from "../util/pathToUri.js" ;
2423import { createDiff , DiffFormatType } from "./context/diffFormatting.js" ;
2524import { DocumentHistoryTracker } from "./DocumentHistoryTracker.js" ;
@@ -45,6 +44,14 @@ const ERRORS_TO_IGNORE = [
4544 "operation was aborted" ,
4645] ;
4746
47+ /**
48+ * This is the next edit analogue to autocomplete's CompletionProvider.
49+ * You will see a lot of similar if not identical methods to CompletionProvider methods.
50+ * All logic used to live inside this class, but that became untenable quickly.
51+ * I moved a lot of the model-specific logic (prompt building, pre/post processing, etc.) to the BaseNextEditProvider and the children inheriting from it.
52+ * Keeping this class around might be a good idea because it handles lots of delicate logic such as abort signals, chains, logging, etc.
53+ * There being a singleton also gives a lot of guarantees about the state of the next edit state machine.
54+ */
4855export class NextEditProvider {
4956 private static instance : NextEditProvider | null = null ;
5057
@@ -62,7 +69,6 @@ export class NextEditProvider {
6269 private currentEditChainId : string | null = null ;
6370 private previousRequest : AutocompleteInput | null = null ;
6471 private previousCompletions : NextEditOutcome [ ] = [ ] ;
65- // private nextEditableRegionsInTheCurrentChain: RangeInFile[] = [];
6672
6773 // Model-specific provider instance.
6874 private modelProvider : BaseNextEditModelProvider | null = null ;
@@ -223,14 +229,14 @@ export class NextEditProvider {
223229
224230 this . currentEditChainId = null ;
225231 this . previousCompletions = [ ] ;
226- // TODO: this should be cleaned up in the prefetch queue.
227- // this.nextEditableRegionsInTheCurrentChain = [];
228232
229233 if ( this . previousRequest ) {
230234 const fileContent = (
231235 await this . ide . readFile ( this . previousRequest . filepath )
232236 ) . toString ( ) ;
237+
233238 const ast = await getAst ( this . previousRequest . filepath , fileContent ) ;
239+
234240 if ( ast ) {
235241 DocumentHistoryTracker . getInstance ( ) . push (
236242 localPathOrUriToPath ( this . previousRequest . filepath ) ,
@@ -253,6 +259,9 @@ export class NextEditProvider {
253259 return this . previousCompletions . length === 1 ;
254260 }
255261
262+ /**
263+ * This is the main entry point to this class.
264+ */
256265 public async provideInlineCompletionItems (
257266 input : AutocompleteInput ,
258267 token : AbortSignal | undefined ,
@@ -370,6 +379,8 @@ export class NextEditProvider {
370379 throw new Error ( "Model provider not initialized" ) ;
371380 }
372381
382+ // NOTE: getAllSnippetsWithoutRace doesn't seem to incur much performance penalties when compared to getAllSnippets.
383+ // Use getAllSnippets if snippet gathering becomes noticably slow.
373384 const [ snippetPayload , workspaceDirs ] = await Promise . all ( [
374385 getAllSnippetsWithoutRace ( {
375386 helper,
@@ -445,6 +456,9 @@ export class NextEditProvider {
445456 }
446457
447458 // Send prompts to LLM (using only user prompt for fine-tuned models).
459+ // prompts[1] extracts the user prompt from the system-user prompt pair.
460+ // NOTE: Stream is currently set to false, but this should ideally be a per-model flag.
461+ // Mercury Coder currently does not support streaming.
448462 const msg : ChatMessage = await llm . chat ( [ prompts [ 1 ] ] , token , {
449463 stream : false ,
450464 } ) ;
@@ -484,7 +498,7 @@ export class NextEditProvider {
484498 }
485499
486500 if ( outcome ) {
487- // Handle NextEditProvider-specific state
501+ // Handle NextEditProvider-specific state.
488502 this . previousCompletions . push ( outcome ) ;
489503
490504 // Mark as displayed for JetBrains
@@ -494,97 +508,6 @@ export class NextEditProvider {
494508 return outcome ;
495509 }
496510
497- private _calculateOptimalEditableRegion (
498- helper : HelperVars ,
499- heuristic : "fourChars" | "tokenizer" = "tokenizer" ,
500- ) : {
501- editableRegionStartLine : number ;
502- editableRegionEndLine : number ;
503- } {
504- const cursorLine = helper . pos . line ;
505- const fileLines = helper . fileLines ;
506- const MAX_TOKENS = 512 ;
507-
508- // Initialize with cursor line.
509- let editableRegionStartLine = cursorLine ;
510- let editableRegionEndLine = cursorLine ;
511-
512- // Get initial content and token count.
513- let currentContent = fileLines [ cursorLine ] ;
514- let totalTokens =
515- heuristic === "tokenizer"
516- ? countTokens ( currentContent , helper . modelName )
517- : Math . ceil ( currentContent . length / 4 ) ;
518-
519- // Expand outward alternating between adding lines above and below.
520- let addingAbove = true ;
521-
522- while ( totalTokens < MAX_TOKENS ) {
523- let addedLine = false ;
524-
525- if ( addingAbove ) {
526- // Try to add a line above.
527- if ( editableRegionStartLine > 0 ) {
528- editableRegionStartLine -- ;
529- const lineContent = fileLines [ editableRegionStartLine ] ;
530- const lineTokens =
531- heuristic === "tokenizer"
532- ? countTokens ( lineContent , helper . modelName )
533- : Math . ceil ( lineContent . length / 4 ) ;
534-
535- totalTokens += lineTokens ;
536- addedLine = true ;
537- }
538- } else {
539- // Try to add a line below.
540- if ( editableRegionEndLine < fileLines . length - 1 ) {
541- editableRegionEndLine ++ ;
542- const lineContent = fileLines [ editableRegionEndLine ] ;
543- const lineTokens =
544- heuristic === "tokenizer"
545- ? countTokens ( lineContent , helper . modelName )
546- : Math . ceil ( lineContent . length / 4 ) ;
547-
548- totalTokens += lineTokens ;
549- addedLine = true ;
550- }
551- }
552-
553- // If we can't add in the current direction, try the other.
554- if ( ! addedLine ) {
555- // If we're already at both file boundaries, we're done.
556- if (
557- editableRegionStartLine === 0 &&
558- editableRegionEndLine === fileLines . length - 1
559- ) {
560- break ;
561- }
562-
563- // If we couldn't add in one direction, force the next attempt in the other direction.
564- addingAbove = ! addingAbove ;
565- continue ;
566- }
567-
568- // If we exceeded the token limit, revert the last addition.
569- if ( totalTokens > MAX_TOKENS ) {
570- if ( addingAbove ) {
571- editableRegionStartLine ++ ;
572- } else {
573- editableRegionEndLine -- ;
574- }
575- break ;
576- }
577-
578- // Alternate between adding above and below for balanced context.
579- addingAbove = ! addingAbove ;
580- }
581-
582- return {
583- editableRegionStartLine,
584- editableRegionEndLine,
585- } ;
586- }
587-
588511 private async _markDisplayedIfJetBrains (
589512 completionId : string ,
590513 outcome : NextEditOutcome ,
@@ -595,6 +518,12 @@ export class NextEditProvider {
595518 }
596519 }
597520
521+ /**
522+ * This is a wrapper around provideInlineCompletionItems.
523+ * This is invoked when we call the model in the background using prefetch.
524+ * It's not currently used anywhere (references are not used either), but I decided to keep it in case we actually need to use prefetch.
525+ * You will see that calls to this method is made from NextEditPrefetchQueue.proecss(), which is wrapped in `if (!this.usingFullFileDiff)`.
526+ */
598527 public async provideInlineCompletionItemsWithChain (
599528 ctx : {
600529 completionId : string ;
0 commit comments