@@ -21,6 +21,8 @@ import type { SplitListener } from './splitListener';
21
21
import type { EntityCollector } from './entityCollector' ;
22
22
import { EntityContext } from './entityCollector' ;
23
23
24
+ const SEPARATOR : string = ';' ;
25
+
24
26
/**
25
27
* Basic SQL class, every sql needs extends it.
26
28
*/
@@ -65,13 +67,11 @@ export abstract class BasicSQL<
65
67
* @param candidates candidate list
66
68
* @param allTokens all tokens from input
67
69
* @param caretTokenIndex tokenIndex of caretPosition
68
- * @param tokenIndexOffset offset of the tokenIndex in the candidates compared to the tokenIndex in allTokens
69
70
*/
70
71
protected abstract processCandidates (
71
72
candidates : CandidatesCollection ,
72
73
allTokens : Token [ ] ,
73
- caretTokenIndex : number ,
74
- tokenIndexOffset : number
74
+ caretTokenIndex : number
75
75
) : Suggestions < Token > ;
76
76
77
77
/**
@@ -251,6 +251,78 @@ export abstract class BasicSQL<
251
251
return res ;
252
252
}
253
253
254
+ /**
255
+ * Get the smaller range of input
256
+ * @param input string
257
+ * @param allTokens all tokens from input
258
+ * @param tokenIndexOffset offset of the tokenIndex in the range of input
259
+ * @param caretTokenIndex tokenIndex of caretPosition
260
+ * @returns inputSlice: string, caretTokenIndex: number
261
+ */
262
+ private splitInputBySeparator (
263
+ input : string ,
264
+ allTokens : Token [ ] ,
265
+ tokenIndexOffset : number ,
266
+ caretTokenIndex : number
267
+ ) : { inputSlice : string ; allTokens : Token [ ] ; caretTokenIndex : number } {
268
+ const _allTokens = allTokens . slice ( tokenIndexOffset ) ;
269
+ /**
270
+ * Set startToken
271
+ */
272
+ let startToken : Token | null = null ;
273
+ for ( let tokenIndex = caretTokenIndex - tokenIndexOffset ; tokenIndex >= 0 ; tokenIndex -- ) {
274
+ const token = _allTokens [ tokenIndex ] ;
275
+ if ( token ?. text === SEPARATOR ) {
276
+ startToken = _allTokens [ tokenIndex + 1 ] ;
277
+ break ;
278
+ }
279
+ }
280
+ if ( startToken === null ) {
281
+ startToken = _allTokens [ 0 ] ;
282
+ }
283
+
284
+ /**
285
+ * Set stopToken
286
+ */
287
+ let stopToken : Token | null = null ;
288
+ for (
289
+ let tokenIndex = caretTokenIndex - tokenIndexOffset ;
290
+ tokenIndex < _allTokens . length ;
291
+ tokenIndex ++
292
+ ) {
293
+ const token = _allTokens [ tokenIndex ] ;
294
+ if ( token ?. text === SEPARATOR ) {
295
+ stopToken = token ;
296
+ break ;
297
+ }
298
+ }
299
+ if ( stopToken === null ) {
300
+ stopToken = _allTokens [ _allTokens . length - 1 ] ;
301
+ }
302
+
303
+ const indexOffset = _allTokens [ 0 ] . start ;
304
+ let startIndex = startToken . start - indexOffset ;
305
+ let stopIndex = stopToken . stop + 1 - indexOffset ;
306
+
307
+ /**
308
+ * Save offset of the tokenIndex in the range of input
309
+ * compared to the tokenIndex in the whole input
310
+ */
311
+ const _tokenIndexOffset = startToken . tokenIndex ;
312
+ const _caretTokenIndex = caretTokenIndex - _tokenIndexOffset ;
313
+
314
+ /**
315
+ * Get the smaller range of _input
316
+ */
317
+ const _input = input . slice ( startIndex , stopIndex ) ;
318
+
319
+ return {
320
+ inputSlice : _input ,
321
+ allTokens : allTokens . slice ( _tokenIndexOffset ) ,
322
+ caretTokenIndex : _caretTokenIndex ,
323
+ } ;
324
+ }
325
+
254
326
/**
255
327
* Get suggestions of syntax and token at caretPosition
256
328
* @param input source string
@@ -262,12 +334,13 @@ export abstract class BasicSQL<
262
334
caretPosition : CaretPosition
263
335
) : Suggestions | null {
264
336
const splitListener = this . splitListener ;
337
+ let inputSlice = input ;
265
338
266
- this . parseWithCache ( input ) ;
339
+ this . parseWithCache ( inputSlice ) ;
267
340
if ( ! this . _parseTree ) return null ;
268
341
269
342
let sqlParserIns = this . _parser ;
270
- const allTokens = this . getAllTokens ( input ) ;
343
+ let allTokens = this . getAllTokens ( inputSlice ) ;
271
344
let caretTokenIndex = findCaretTokenIndex ( caretPosition , allTokens ) ;
272
345
let c3Context : ParserRuleContext = this . _parseTree ;
273
346
let tokenIndexOffset : number = 0 ;
@@ -321,22 +394,43 @@ export abstract class BasicSQL<
321
394
}
322
395
323
396
// A boundary consisting of the index of the input.
324
- const startIndex = startStatement ?. start ?. start ?? 0 ;
325
- const stopIndex = stopStatement ?. stop ?. stop ?? input . length - 1 ;
397
+ let startIndex = startStatement ?. start ?. start ?? 0 ;
398
+ let stopIndex = stopStatement ?. stop ?. stop ?? inputSlice . length - 1 ;
326
399
327
400
/**
328
401
* Save offset of the tokenIndex in the range of input
329
402
* compared to the tokenIndex in the whole input
330
403
*/
331
404
tokenIndexOffset = startStatement ?. start ?. tokenIndex ?? 0 ;
332
- caretTokenIndex = caretTokenIndex - tokenIndexOffset ;
405
+ inputSlice = inputSlice . slice ( startIndex , stopIndex ) ;
406
+ }
333
407
334
- /**
335
- * Reparse the input fragment,
336
- * and c3 will collect candidates in the newly generated parseTree.
337
- */
338
- const inputSlice = input . slice ( startIndex , stopIndex ) ;
408
+ /**
409
+ * Split the inputSlice by separator to get the smaller range of inputSlice.
410
+ */
411
+ if ( inputSlice . includes ( SEPARATOR ) ) {
412
+ const {
413
+ inputSlice : _input ,
414
+ allTokens : _allTokens ,
415
+ caretTokenIndex : _caretTokenIndex ,
416
+ } = this . splitInputBySeparator (
417
+ inputSlice ,
418
+ allTokens ,
419
+ tokenIndexOffset ,
420
+ caretTokenIndex
421
+ ) ;
422
+
423
+ allTokens = _allTokens ;
424
+ caretTokenIndex = _caretTokenIndex ;
425
+ inputSlice = _input ;
426
+ } else {
427
+ caretTokenIndex = caretTokenIndex - tokenIndexOffset ;
428
+ }
339
429
430
+ /**
431
+ * Reparse the input fragment, and c3 will collect candidates in the newly generated parseTree when input changed.
432
+ */
433
+ if ( inputSlice !== input ) {
340
434
const lexer = this . createLexer ( inputSlice ) ;
341
435
lexer . removeErrorListeners ( ) ;
342
436
const tokenStream = new CommonTokenStream ( lexer ) ;
@@ -356,12 +450,7 @@ export abstract class BasicSQL<
356
450
core . preferredRules = this . preferredRules ;
357
451
358
452
const candidates = core . collectCandidates ( caretTokenIndex , c3Context ) ;
359
- const originalSuggestions = this . processCandidates (
360
- candidates ,
361
- allTokens ,
362
- caretTokenIndex ,
363
- tokenIndexOffset
364
- ) ;
453
+ const originalSuggestions = this . processCandidates ( candidates , allTokens , caretTokenIndex ) ;
365
454
366
455
const syntaxSuggestions : SyntaxSuggestion < WordRange > [ ] = originalSuggestions . syntax . map (
367
456
( syntaxCtx ) => {
0 commit comments