Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
155 changes: 155 additions & 0 deletions packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -898,3 +898,158 @@ it('tracks error', () => {
1,
);
});

describe('trackMetricsOf', () => {
it('tracks success and token usage from metrics', async () => {
const tracker = new LDAIConfigTrackerImpl(
mockLdClient,
configKey,
variationKey,
version,
modelName,
providerName,
testContext,
);

const mockResult = { response: 'test' };
const mockMetrics = {
success: true,
usage: { total: 100, input: 50, output: 50 },
};

const metricsExtractor = jest.fn().mockReturnValue(mockMetrics);
const operation = jest.fn().mockResolvedValue(mockResult);

const result = await tracker.trackMetricsOf(metricsExtractor, operation);

expect(result).toBe(mockResult);
expect(metricsExtractor).toHaveBeenCalledWith(mockResult);
expect(operation).toHaveBeenCalled();

// Should track success
expect(mockTrack).toHaveBeenCalledWith(
'$ld:ai:generation:success',
testContext,
{ configKey, variationKey, version, modelName, providerName },
1,
);

// Should track token usage
expect(mockTrack).toHaveBeenCalledWith(
'$ld:ai:tokens:total',
testContext,
{ configKey, variationKey, version, modelName, providerName },
100,
);
expect(mockTrack).toHaveBeenCalledWith(
'$ld:ai:tokens:input',
testContext,
{ configKey, variationKey, version, modelName, providerName },
50,
);
expect(mockTrack).toHaveBeenCalledWith(
'$ld:ai:tokens:output',
testContext,
{ configKey, variationKey, version, modelName, providerName },
50,
);
});

it('tracks failure when metrics indicate failure', async () => {
const tracker = new LDAIConfigTrackerImpl(
mockLdClient,
configKey,
variationKey,
version,
modelName,
providerName,
testContext,
);

const mockResult = { response: 'test' };
const mockMetrics = {
success: false,
};

const metricsExtractor = jest.fn().mockReturnValue(mockMetrics);
const operation = jest.fn().mockResolvedValue(mockResult);

await tracker.trackMetricsOf(metricsExtractor, operation);

// Should track error
expect(mockTrack).toHaveBeenCalledWith(
'$ld:ai:generation:error',
testContext,
{ configKey, variationKey, version, modelName, providerName },
1,
);
});

it('tracks failure when operation throws', async () => {
const tracker = new LDAIConfigTrackerImpl(
mockLdClient,
configKey,
variationKey,
version,
modelName,
providerName,
testContext,
);

const error = new Error('Operation failed');
const metricsExtractor = jest.fn();
const operation = jest.fn().mockRejectedValue(error);

await expect(tracker.trackMetricsOf(metricsExtractor, operation)).rejects.toThrow(error);

// Should track error
expect(mockTrack).toHaveBeenCalledWith(
'$ld:ai:generation:error',
testContext,
{ configKey, variationKey, version, modelName, providerName },
1,
);

// Should not call metrics extractor when operation fails
expect(metricsExtractor).not.toHaveBeenCalled();
});

it('tracks metrics without token usage', async () => {
const tracker = new LDAIConfigTrackerImpl(
mockLdClient,
configKey,
variationKey,
version,
modelName,
providerName,
testContext,
);

const mockResult = { response: 'test' };
const mockMetrics = {
success: true,
// No usage provided
};

const metricsExtractor = jest.fn().mockReturnValue(mockMetrics);
const operation = jest.fn().mockResolvedValue(mockResult);

await tracker.trackMetricsOf(metricsExtractor, operation);

// Should track success but not token usage
expect(mockTrack).toHaveBeenCalledWith(
'$ld:ai:generation:success',
testContext,
{ configKey, variationKey, version, modelName, providerName },
1,
);

// Should not track token usage
expect(mockTrack).not.toHaveBeenCalledWith(
'$ld:ai:tokens:total',
expect.any(Object),
expect.any(Object),
expect.any(Number),
);
});
});
231 changes: 231 additions & 0 deletions packages/sdk/server-ai/__tests__/TrackedChat.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,231 @@
import { TrackedChat } from '../src/api/chat/TrackedChat';
import { ChatResponse } from '../src/api/chat/types';
import { LDAIConfig, LDMessage } from '../src/api/config/LDAIConfig';
import { LDAIConfigTracker } from '../src/api/config/LDAIConfigTracker';
import { AIProvider } from '../src/api/providers/AIProvider';

describe('TrackedChat', () => {
let mockProvider: jest.Mocked<AIProvider>;
let mockTracker: jest.Mocked<LDAIConfigTracker>;
let aiConfig: LDAIConfig;

beforeEach(() => {
// Mock the AIProvider
mockProvider = {
invokeModel: jest.fn(),
} as any;

// Mock the LDAIConfigTracker
mockTracker = {
trackMetricsOf: jest.fn(),
trackDuration: jest.fn(),
trackTokens: jest.fn(),
trackSuccess: jest.fn(),
trackError: jest.fn(),
trackFeedback: jest.fn(),
trackTimeToFirstToken: jest.fn(),
trackDurationOf: jest.fn(),
trackOpenAIMetrics: jest.fn(),
trackBedrockConverseMetrics: jest.fn(),
trackVercelAIMetrics: jest.fn(),
getSummary: jest.fn(),
} as any;

// Create a basic AI config
aiConfig = {
enabled: true,
messages: [{ role: 'system', content: 'You are a helpful assistant.' }],
model: { name: 'gpt-4' },
provider: { name: 'openai' },
tracker: mockTracker,
toVercelAISDK: jest.fn(),
};
});

describe('appendMessages', () => {
it('appends messages to the conversation history', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

const messagesToAppend: LDMessage[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there!' },
];

chat.appendMessages(messagesToAppend);

const messages = chat.getMessages(false);
expect(messages).toHaveLength(2);
expect(messages[0]).toEqual({ role: 'user', content: 'Hello' });
expect(messages[1]).toEqual({ role: 'assistant', content: 'Hi there!' });
});

it('appends multiple message batches sequentially', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

chat.appendMessages([{ role: 'user', content: 'First message' }]);
chat.appendMessages([{ role: 'assistant', content: 'Second message' }]);
chat.appendMessages([{ role: 'user', content: 'Third message' }]);

const messages = chat.getMessages(false);
expect(messages).toHaveLength(3);
expect(messages[0].content).toBe('First message');
expect(messages[1].content).toBe('Second message');
expect(messages[2].content).toBe('Third message');
});

it('handles empty message array', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

chat.appendMessages([]);

const messages = chat.getMessages(false);
expect(messages).toHaveLength(0);
});
});

describe('getMessages', () => {
it('returns only conversation history when includeConfigMessages is false', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

chat.appendMessages([
{ role: 'user', content: 'User message' },
{ role: 'assistant', content: 'Assistant message' },
]);

const messages = chat.getMessages(false);

expect(messages).toHaveLength(2);
expect(messages[0]).toEqual({ role: 'user', content: 'User message' });
expect(messages[1]).toEqual({ role: 'assistant', content: 'Assistant message' });
});

it('returns only conversation history when includeConfigMessages is omitted (defaults to false)', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

chat.appendMessages([{ role: 'user', content: 'User message' }]);

const messages = chat.getMessages();

expect(messages).toHaveLength(1);
expect(messages[0]).toEqual({ role: 'user', content: 'User message' });
});

it('returns config messages prepended when includeConfigMessages is true', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

chat.appendMessages([
{ role: 'user', content: 'User message' },
{ role: 'assistant', content: 'Assistant message' },
]);

const messages = chat.getMessages(true);

expect(messages).toHaveLength(3);
expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' });
expect(messages[1]).toEqual({ role: 'user', content: 'User message' });
expect(messages[2]).toEqual({ role: 'assistant', content: 'Assistant message' });
});

it('returns only config messages when no conversation history exists and includeConfigMessages is true', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

const messages = chat.getMessages(true);

expect(messages).toHaveLength(1);
expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' });
});

it('returns empty array when no messages exist and includeConfigMessages is false', () => {
const configWithoutMessages: LDAIConfig = {
...aiConfig,
messages: [],
};
const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider);

const messages = chat.getMessages(false);

expect(messages).toHaveLength(0);
});

it('returns a copy of the messages array (not a reference)', () => {
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

chat.appendMessages([{ role: 'user', content: 'Original message' }]);

const messages1 = chat.getMessages();
const messages2 = chat.getMessages();

expect(messages1).not.toBe(messages2);
expect(messages1).toEqual(messages2);

// Modifying returned array should not affect internal state
messages1.push({ role: 'assistant', content: 'Modified' });

const messages3 = chat.getMessages();
expect(messages3).toHaveLength(1);
expect(messages3[0].content).toBe('Original message');
});

it('handles undefined config messages gracefully', () => {
const configWithoutMessages: LDAIConfig = {
...aiConfig,
messages: undefined,
};
const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider);

chat.appendMessages([{ role: 'user', content: 'User message' }]);

const messagesWithConfig = chat.getMessages(true);
expect(messagesWithConfig).toHaveLength(1);
expect(messagesWithConfig[0].content).toBe('User message');

const messagesWithoutConfig = chat.getMessages(false);
expect(messagesWithoutConfig).toHaveLength(1);
expect(messagesWithoutConfig[0].content).toBe('User message');
});
});

describe('integration with invoke', () => {
it('adds messages from invoke to history accessible via getMessages', async () => {
const mockResponse: ChatResponse = {
message: { role: 'assistant', content: 'Response from model' },
metrics: { success: true },
};

mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func());

mockProvider.invokeModel.mockResolvedValue(mockResponse);

const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

await chat.invoke('Hello');

const messages = chat.getMessages(false);
expect(messages).toHaveLength(2);
expect(messages[0]).toEqual({ role: 'user', content: 'Hello' });
expect(messages[1]).toEqual({ role: 'assistant', content: 'Response from model' });
});

it('preserves appended messages when invoking', async () => {
const mockResponse: ChatResponse = {
message: { role: 'assistant', content: 'Response' },
metrics: { success: true },
};

mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func());

mockProvider.invokeModel.mockResolvedValue(mockResponse);

const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);

chat.appendMessages([{ role: 'user', content: 'Pre-appended message' }]);
await chat.invoke('New user input');

const messages = chat.getMessages(false);
expect(messages).toHaveLength(3);
expect(messages[0].content).toBe('Pre-appended message');
expect(messages[1].content).toBe('New user input');
expect(messages[2].content).toBe('Response');
});
});
});
Loading