Skip to content

Commit e6c42a8

Browse files
authored
fix: Support previous v4 of Vercel AI SDK (#962)
fix: Fix metric tracking for v5 responses
1 parent 7f3da30 commit e6c42a8

File tree

3 files changed

+63
-16
lines changed

3 files changed

+63
-16
lines changed

packages/ai-providers/server-ai-vercel/__tests__/VercelProvider.test.ts

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,52 @@ describe('VercelProvider', () => {
6868
},
6969
});
7070
});
71+
72+
it('supports v5 field names (inputTokens, outputTokens)', () => {
73+
const mockResponse = {
74+
usage: {
75+
inputTokens: 40,
76+
outputTokens: 60,
77+
totalTokens: 100,
78+
},
79+
};
80+
81+
const result = VercelProvider.createAIMetrics(mockResponse);
82+
83+
expect(result).toEqual({
84+
success: true,
85+
usage: {
86+
total: 100,
87+
input: 40,
88+
output: 60,
89+
},
90+
});
91+
});
92+
93+
it('prefers v5 field names over v4 when both are present', () => {
94+
const mockResponse = {
95+
usage: {
96+
// v4 field names
97+
promptTokens: 10,
98+
completionTokens: 20,
99+
// v5 field names (should be preferred)
100+
inputTokens: 40,
101+
outputTokens: 60,
102+
totalTokens: 100,
103+
},
104+
};
105+
106+
const result = VercelProvider.createAIMetrics(mockResponse);
107+
108+
expect(result).toEqual({
109+
success: true,
110+
usage: {
111+
total: 100,
112+
input: 40, // inputTokens preferred over promptTokens
113+
output: 60, // outputTokens preferred over completionTokens
114+
},
115+
});
116+
});
71117
});
72118

73119
describe('invokeModel', () => {

packages/ai-providers/server-ai-vercel/package.json

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
"@ai-sdk/google": "^2.0.0",
3333
"@ai-sdk/mistral": "^2.0.0",
3434
"@ai-sdk/openai": "^2.0.0",
35-
"@ai-sdk/provider": "^2.0.0",
3635
"@launchdarkly/server-sdk-ai": "^0.12.2",
3736
"@trivago/prettier-plugin-sort-imports": "^4.1.1",
3837
"@types/jest": "^29.5.3",
@@ -57,7 +56,6 @@
5756
"@ai-sdk/google": "^2.0.0",
5857
"@ai-sdk/mistral": "^2.0.0",
5958
"@ai-sdk/openai": "^2.0.0",
60-
"@ai-sdk/provider": "^2.0.0",
6159
"@launchdarkly/server-sdk-ai": "^0.12.2",
6260
"ai": "^4.0.0 || ^5.0.0"
6361
},

packages/ai-providers/server-ai-vercel/src/VercelProvider.ts

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
import { LanguageModelV2 } from '@ai-sdk/provider';
2-
import { generateText } from 'ai';
1+
import { generateText, LanguageModel } from 'ai';
32

43
import { AIProvider } from '@launchdarkly/server-sdk-ai';
54
import type {
@@ -16,10 +15,10 @@ import type {
1615
* This provider integrates Vercel AI SDK with LaunchDarkly's tracking capabilities.
1716
*/
1817
export class VercelProvider extends AIProvider {
19-
private _model: LanguageModelV2;
18+
private _model: LanguageModel;
2019
private _parameters: Record<string, unknown>;
2120

22-
constructor(model: LanguageModelV2, parameters: Record<string, unknown>, logger?: LDLogger) {
21+
constructor(model: LanguageModel, parameters: Record<string, unknown>, logger?: LDLogger) {
2322
super(logger);
2423
this._model = model;
2524
this._parameters = parameters;
@@ -47,6 +46,8 @@ export class VercelProvider extends AIProvider {
4746
*/
4847
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
4948
// Call Vercel AI generateText
49+
// Type assertion: our MinLanguageModel is compatible with the expected LanguageModel interface
50+
// The generateText function will work with any object that has the required properties
5051
const result = await generateText({
5152
model: this._model,
5253
messages,
@@ -71,7 +72,7 @@ export class VercelProvider extends AIProvider {
7172
/**
7273
* Get the underlying Vercel AI model instance.
7374
*/
74-
getModel(): LanguageModelV2 {
75+
getModel(): LanguageModel {
7576
return this._model;
7677
}
7778

@@ -98,16 +99,18 @@ export class VercelProvider extends AIProvider {
9899
* Create AI metrics information from a Vercel AI response.
99100
* This method extracts token usage information and success status from Vercel AI responses
100101
* and returns a LaunchDarkly AIMetrics object.
102+
* Supports both v4 and v5 field names for backward compatibility.
101103
*/
102104
static createAIMetrics(vercelResponse: any): LDAIMetrics {
103105
// Extract token usage if available
104106
let usage: LDTokenUsage | undefined;
105107
if (vercelResponse?.usage) {
106-
const { promptTokens, completionTokens, totalTokens } = vercelResponse.usage;
108+
const { totalTokens, inputTokens, promptTokens, outputTokens, completionTokens } =
109+
vercelResponse.usage;
107110
usage = {
108-
total: totalTokens || 0,
109-
input: promptTokens || 0,
110-
output: completionTokens || 0,
111+
total: totalTokens ?? 0,
112+
input: inputTokens ?? promptTokens ?? 0,
113+
output: outputTokens ?? completionTokens ?? 0,
111114
};
112115
}
113116

@@ -125,7 +128,7 @@ export class VercelProvider extends AIProvider {
125128
* @param aiConfig The LaunchDarkly AI configuration
126129
* @returns A Promise that resolves to a configured Vercel AI model
127130
*/
128-
static async createVercelModel(aiConfig: LDAIConfig): Promise<LanguageModelV2> {
131+
static async createVercelModel(aiConfig: LDAIConfig): Promise<LanguageModel> {
129132
const providerName = VercelProvider.mapProvider(aiConfig.provider?.name || '');
130133
const modelName = aiConfig.model?.name || '';
131134
// Parameters are not used in model creation but kept for future use
@@ -143,28 +146,28 @@ export class VercelProvider extends AIProvider {
143146
}
144147
case 'anthropic':
145148
try {
146-
const { anthropic } = await import('@ai-sdk/anthropic' as any);
149+
const { anthropic } = await import('@ai-sdk/anthropic');
147150
return anthropic(modelName);
148151
} catch (error) {
149152
throw new Error(`Failed to load @ai-sdk/anthropic: ${error}`);
150153
}
151154
case 'google':
152155
try {
153-
const { google } = await import('@ai-sdk/google' as any);
156+
const { google } = await import('@ai-sdk/google');
154157
return google(modelName);
155158
} catch (error) {
156159
throw new Error(`Failed to load @ai-sdk/google: ${error}`);
157160
}
158161
case 'cohere':
159162
try {
160-
const { cohere } = await import('@ai-sdk/cohere' as any);
163+
const { cohere } = await import('@ai-sdk/cohere');
161164
return cohere(modelName);
162165
} catch (error) {
163166
throw new Error(`Failed to load @ai-sdk/cohere: ${error}`);
164167
}
165168
case 'mistral':
166169
try {
167-
const { mistral } = await import('@ai-sdk/mistral' as any);
170+
const { mistral } = await import('@ai-sdk/mistral');
168171
return mistral(modelName);
169172
} catch (error) {
170173
throw new Error(`Failed to load @ai-sdk/mistral: ${error}`);

0 commit comments

Comments
 (0)