Skip to content

Commit 9a13fab

Browse files
committed
Closes gitkraken#4227 improves AI error messages
1 parent 4ddfc53 commit 9a13fab

File tree

7 files changed

+144
-42
lines changed

7 files changed

+144
-42
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/) and this p
1414
### Changed
1515

1616
- Improves editor revision navigation ([#4200](https://github.com/gitkraken/vscode-gitlens/issues/4200))
17+
- Improves AI-related error messages ([#4227](https://github.com/gitkraken/vscode-gitlens/issues/4227))
1718

1819
### Fixed
1920

src/errors.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,11 @@ export const enum AIErrorReason {
283283
RequestTooLarge,
284284
UserQuotaExceeded,
285285
RateLimitExceeded,
286+
RateLimitOrFundsExceeded,
286287
ServiceCapacityExceeded,
288+
ModelNotSupported,
289+
ModelUserUnauthorized,
290+
ModelUserDeniedAccess,
287291
}
288292

289293
export class AIError extends Error {
@@ -305,12 +309,24 @@ export class AIError extends Error {
305309
case AIErrorReason.RateLimitExceeded:
306310
message = 'Rate limit exceeded';
307311
break;
312+
case AIErrorReason.RateLimitOrFundsExceeded:
313+
message = 'Rate limit exceeded or your account is out of funds';
314+
break;
308315
case AIErrorReason.ServiceCapacityExceeded:
309316
message = 'Service capacity exceeded';
310317
break;
311318
case AIErrorReason.NoRequestData:
312319
message = original?.message ?? 'No data was provided for the request';
313320
break;
321+
case AIErrorReason.ModelNotSupported:
322+
message = 'Model not supported for this request';
323+
break;
324+
case AIErrorReason.ModelUserUnauthorized:
325+
message = 'User is not authorized to use the specified model';
326+
break;
327+
case AIErrorReason.ModelUserDeniedAccess:
328+
message = 'User denied access to the specified model';
329+
break;
314330
default:
315331
message = original?.message ?? 'An unknown error occurred';
316332
break;

src/plus/ai/aiProviderService.ts

Lines changed: 63 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -938,11 +938,17 @@ export class AIProviderService implements Disposable {
938938
'You do not have the required entitlement or are over the limits to use this AI feature',
939939
);
940940
return undefined;
941-
case AIErrorReason.RequestTooLarge:
942-
void window.showErrorMessage(
943-
'Your request is too large. Please reduce the size of your request or switch to a different model and try again.',
941+
case AIErrorReason.RequestTooLarge: {
942+
const switchModel: MessageItem = { title: 'Switch Model' };
943+
const result = await window.showErrorMessage(
944+
'Your request is too large. Please reduce the size of your request or switch to a different model, and then try again.',
945+
switchModel,
944946
);
947+
if (result === switchModel) {
948+
void this.switchModel(source);
949+
}
945950
return undefined;
951+
}
946952
case AIErrorReason.UserQuotaExceeded: {
947953
const increaseLimit: MessageItem = { title: 'Increase Limit' };
948954
const result = await window.showErrorMessage(
@@ -956,18 +962,69 @@ export class AIProviderService implements Disposable {
956962

957963
return undefined;
958964
}
959-
case AIErrorReason.RateLimitExceeded:
960-
void window.showErrorMessage(
961-
'Rate limit exceeded. Please wait a few moments and try again later.',
965+
case AIErrorReason.RateLimitExceeded: {
966+
const switchModel: MessageItem = { title: 'Switch Model' };
967+
const result = await window.showErrorMessage(
968+
'Rate limit exceeded. Please wait a few moments or switch to a different model, and then try again.',
969+
switchModel,
962970
);
971+
if (result === switchModel) {
972+
void this.switchModel(source);
973+
}
974+
963975
return undefined;
976+
}
977+
case AIErrorReason.RateLimitOrFundsExceeded: {
978+
const switchModel: MessageItem = { title: 'Switch Model' };
979+
const result = await window.showErrorMessage(
980+
'Rate limit exceeded, or your account is out of funds. Please wait a few moments, check your account balance, or switch to a different model, and then try again.',
981+
switchModel,
982+
);
983+
if (result === switchModel) {
984+
void this.switchModel(source);
985+
}
986+
return undefined;
987+
}
964988
case AIErrorReason.ServiceCapacityExceeded: {
965989
void window.showErrorMessage(
966990
'GitKraken AI is temporarily unable to process your request due to high volume. Please wait a few moments and try again. If this issue persists, please contact support.',
967991
'OK',
968992
);
969993
return undefined;
970994
}
995+
case AIErrorReason.ModelNotSupported: {
996+
const switchModel: MessageItem = { title: 'Switch Model' };
997+
const result = await window.showErrorMessage(
998+
'The selected model is not supported for this request. Please select a different model and try again.',
999+
switchModel,
1000+
);
1001+
if (result === switchModel) {
1002+
void this.switchModel(source);
1003+
}
1004+
return undefined;
1005+
}
1006+
case AIErrorReason.ModelUserUnauthorized: {
1007+
const switchModel: MessageItem = { title: 'Switch Model' };
1008+
const result = await window.showErrorMessage(
1009+
'You do not have access to the selected model. Please select a different model and try again.',
1010+
switchModel,
1011+
);
1012+
if (result === switchModel) {
1013+
void this.switchModel(source);
1014+
}
1015+
return undefined;
1016+
}
1017+
case AIErrorReason.ModelUserDeniedAccess: {
1018+
const switchModel: MessageItem = { title: 'Switch Model' };
1019+
const result = await window.showErrorMessage(
1020+
'You have denied access to the selected model. Please provide access or select a different model, and then try again.',
1021+
switchModel,
1022+
);
1023+
if (result === switchModel) {
1024+
void this.switchModel(source);
1025+
}
1026+
return undefined;
1027+
}
9711028
}
9721029

9731030
return undefined;

src/plus/ai/anthropicProvider.ts

Lines changed: 18 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import type { CancellationToken } from 'vscode';
22
import type { Response } from '@env/fetch';
33
import { anthropicProviderDescriptor as provider } from '../../constants.ai';
4+
import { AIError, AIErrorReason } from '../../errors';
45
import type { AIActionType, AIModel } from './models/model';
56
import { OpenAICompatibleProvider } from './openAICompatibleProvider';
67

@@ -150,36 +151,32 @@ export class AnthropicProvider extends OpenAICompatibleProvider<typeof provider.
150151

151152
protected override async handleFetchFailure<TAction extends AIActionType>(
152153
rsp: Response,
153-
_action: TAction,
154+
action: TAction,
154155
model: AIModel<typeof provider.id>,
155156
retries: number,
156157
maxInputTokens: number,
157158
): Promise<{ retry: true; maxInputTokens: number }> {
158-
if (rsp.status === 404) {
159-
throw new Error(`Your API key doesn't seem to have access to the selected '${model.id}' model`);
160-
}
161-
if (rsp.status === 429) {
162-
throw new Error(
163-
`(${this.name}) ${rsp.status}: Too many requests (rate limit exceeded) or your account is out of funds`,
164-
);
165-
}
159+
if (rsp.status !== 404 && rsp.status !== 429) {
160+
let json;
161+
try {
162+
json = (await rsp.json()) as AnthropicError | undefined;
163+
} catch {}
166164

167-
let json;
168-
try {
169-
json = (await rsp.json()) as AnthropicError | undefined;
170-
} catch {}
165+
debugger;
171166

172-
debugger;
167+
if (json?.error?.type === 'invalid_request_error' && json?.error?.message?.includes('prompt is too long')) {
168+
if (retries < 2) {
169+
return { retry: true, maxInputTokens: maxInputTokens - 200 * (retries || 1) };
170+
}
173171

174-
if (
175-
retries < 2 &&
176-
json?.error?.type === 'invalid_request_error' &&
177-
json?.error?.message?.includes('prompt is too long')
178-
) {
179-
return { retry: true, maxInputTokens: maxInputTokens - 200 * (retries || 1) };
172+
throw new AIError(
173+
AIErrorReason.RequestTooLarge,
174+
new Error(`(${this.name}) ${rsp.status}: ${json?.error?.message || rsp.statusText}`),
175+
);
176+
}
180177
}
181178

182-
throw new Error(`(${this.name}) ${rsp.status}: ${json?.error?.message || rsp.statusText})`);
179+
return super.handleFetchFailure(rsp, action, model, retries, maxInputTokens);
183180
}
184181
}
185182

src/plus/ai/githubModelsProvider.ts

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import type { Response } from '@env/fetch';
22
import { fetch } from '@env/fetch';
33
import { githubProviderDescriptor as provider } from '../../constants.ai';
4+
import { AIError, AIErrorReason } from '../../errors';
45
import type { AIActionType, AIModel } from './models/model';
56
import { OpenAICompatibleProvider } from './openAICompatibleProvider';
67

@@ -67,11 +68,18 @@ export class GitHubModelsProvider extends OpenAICompatibleProvider<typeof provid
6768
json = (await rsp.json()) as { error?: { code: string; message: string } } | undefined;
6869
} catch {}
6970

70-
if (retries < 2 && json?.error?.code === 'tokens_limit_reached') {
71-
const match = /Max size: (\d+) tokens/.exec(json?.error?.message);
72-
if (match?.[1] != null) {
73-
return { retry: true, maxInputTokens: parseInt(match[1], 10) };
71+
if (json?.error?.code === 'tokens_limit_reached') {
72+
if (retries < 2) {
73+
const match = /Max size: (\d+) tokens/.exec(json?.error?.message);
74+
if (match?.[1] != null) {
75+
return { retry: true, maxInputTokens: parseInt(match[1], 10) };
76+
}
7477
}
78+
79+
throw new AIError(
80+
AIErrorReason.RequestTooLarge,
81+
new Error(`(${this.name}) ${rsp.status}: ${json?.error?.message || rsp.statusText}`),
82+
);
7583
}
7684
}
7785

src/plus/ai/openAICompatibleProvider.ts

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import { fetch } from '@env/fetch';
44
import type { Role } from '../../@types/vsls';
55
import type { AIProviders } from '../../constants.ai';
66
import type { Container } from '../../container';
7-
import { AIError, CancellationError } from '../../errors';
7+
import { AIError, AIErrorReason, CancellationError } from '../../errors';
88
import { getLoggableName, Logger } from '../../system/logger';
99
import { startLogScope } from '../../system/logger.scope';
1010
import type { ServerConnection } from '../gk/serverConnection';
@@ -164,11 +164,17 @@ export abstract class OpenAICompatibleProvider<T extends AIProviders> implements
164164
maxInputTokens: number,
165165
): Promise<{ retry: true; maxInputTokens: number }> {
166166
if (rsp.status === 404) {
167-
throw new Error(`Your API key doesn't seem to have access to the selected '${model.id}' model`);
167+
throw new AIError(
168+
AIErrorReason.ModelUserUnauthorized,
169+
new Error(`Your API key doesn't seem to have access to the selected '${model.id}' model`),
170+
);
168171
}
169172
if (rsp.status === 429) {
170-
throw new Error(
171-
`(${this.name}) ${rsp.status}: Too many requests (rate limit exceeded) or your account is out of funds`,
173+
throw new AIError(
174+
AIErrorReason.RateLimitOrFundsExceeded,
175+
new Error(
176+
`(${this.name}) ${rsp.status}: Too many requests (rate limit exceeded) or your account is out of funds`,
177+
),
172178
);
173179
}
174180

@@ -177,8 +183,15 @@ export abstract class OpenAICompatibleProvider<T extends AIProviders> implements
177183
json = (await rsp.json()) as { error?: { code: string; message: string } } | undefined;
178184
} catch {}
179185

180-
if (retries < 2 && json?.error?.code === 'context_length_exceeded') {
181-
return { retry: true, maxInputTokens: maxInputTokens - 200 * (retries || 1) };
186+
if (json?.error?.code === 'context_length_exceeded') {
187+
if (retries < 2) {
188+
return { retry: true, maxInputTokens: maxInputTokens - 200 * (retries || 1) };
189+
}
190+
191+
throw new AIError(
192+
AIErrorReason.RequestTooLarge,
193+
new Error(`(${this.name}) ${rsp.status}: ${json?.error?.message || rsp.statusText}`),
194+
);
182195
}
183196

184197
throw new Error(`(${this.name}) ${rsp.status}: ${json?.error?.message || rsp.statusText}`);

src/plus/ai/vscodeProvider.ts

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import type { CancellationToken, Event, LanguageModelChat, LanguageModelChatSele
22
import { Disposable, EventEmitter, LanguageModelChatMessage, lm } from 'vscode';
33
import { vscodeProviderDescriptor } from '../../constants.ai';
44
import type { Container } from '../../container';
5-
import { CancellationError } from '../../errors';
5+
import { AIError, AIErrorReason, CancellationError } from '../../errors';
66
import { getLoggableName, Logger } from '../../system/logger';
77
import { startLogScope } from '../../system/logger.scope';
88
import { capitalize } from '../../system/string';
@@ -123,19 +123,29 @@ export class VSCodeAIProvider implements AIProvider<typeof provider.id> {
123123

124124
if (ex instanceof Error && 'code' in ex && ex.code === 'NoPermissions') {
125125
Logger.error(ex, scope, `User denied access to ${model.provider.name}`);
126-
throw new Error(`User denied access to ${model.provider.name}`);
126+
throw new AIError(AIErrorReason.ModelUserDeniedAccess, ex);
127127
}
128128

129129
if (ex instanceof Error && 'cause' in ex && ex.cause instanceof Error) {
130130
message += `\n${ex.cause.message}`;
131131

132-
if (retries++ < 2 && ex.cause.message.includes('exceeds token limit')) {
133-
maxInputTokens -= 500 * retries;
134-
continue;
132+
if (ex.cause.message.includes('exceeds token limit')) {
133+
if (retries++ < 2) {
134+
maxInputTokens -= 500 * retries;
135+
continue;
136+
}
137+
138+
Logger.error(ex, scope, `Unable to ${getActionName(action)}: (${model.provider.name})`);
139+
throw new AIError(AIErrorReason.RequestTooLarge, ex);
135140
}
136141
}
137142

138143
Logger.error(ex, scope, `Unable to ${getActionName(action)}: (${model.provider.name})`);
144+
145+
if (message.includes('Model is not supported for this request')) {
146+
throw new AIError(AIErrorReason.ModelNotSupported, ex);
147+
}
148+
139149
throw new Error(
140150
`Unable to ${getActionName(action)}: (${model.provider.name}${
141151
ex.code ? `:${ex.code}` : ''

0 commit comments

Comments
 (0)