diff --git a/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts b/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts index 8d197f4ce..a70fce0b1 100644 --- a/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts +++ b/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts @@ -898,3 +898,158 @@ it('tracks error', () => { 1, ); }); + +describe('trackMetricsOf', () => { + it('tracks success and token usage from metrics', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const mockResult = { response: 'test' }; + const mockMetrics = { + success: true, + usage: { total: 100, input: 50, output: 50 }, + }; + + const metricsExtractor = jest.fn().mockReturnValue(mockMetrics); + const operation = jest.fn().mockResolvedValue(mockResult); + + const result = await tracker.trackMetricsOf(metricsExtractor, operation); + + expect(result).toBe(mockResult); + expect(metricsExtractor).toHaveBeenCalledWith(mockResult); + expect(operation).toHaveBeenCalled(); + + // Should track success + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:success', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + + // Should track token usage + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:tokens:total', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 100, + ); + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:tokens:input', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 50, + ); + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:tokens:output', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 50, + ); + }); + + it('tracks failure when metrics indicate failure', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const mockResult = { response: 'test' }; + const mockMetrics = { + success: false, + }; + + const metricsExtractor = jest.fn().mockReturnValue(mockMetrics); + const operation = jest.fn().mockResolvedValue(mockResult); + + await tracker.trackMetricsOf(metricsExtractor, operation); + + // Should track error + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:error', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + }); + + it('tracks failure when operation throws', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const error = new Error('Operation failed'); + const metricsExtractor = jest.fn(); + const operation = jest.fn().mockRejectedValue(error); + + await expect(tracker.trackMetricsOf(metricsExtractor, operation)).rejects.toThrow(error); + + // Should track error + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:error', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + + // Should not call metrics extractor when operation fails + expect(metricsExtractor).not.toHaveBeenCalled(); + }); + + it('tracks metrics without token usage', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const mockResult = { response: 'test' }; + const mockMetrics = { + success: true, + // No usage provided + }; + + const metricsExtractor = jest.fn().mockReturnValue(mockMetrics); + const operation = jest.fn().mockResolvedValue(mockResult); + + await tracker.trackMetricsOf(metricsExtractor, operation); + + // Should track success but not token usage + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:success', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + + // Should not track token usage + expect(mockTrack).not.toHaveBeenCalledWith( + '$ld:ai:tokens:total', + expect.any(Object), + expect.any(Object), + expect.any(Number), + ); + }); +}); diff --git a/packages/sdk/server-ai/__tests__/TrackedChat.test.ts b/packages/sdk/server-ai/__tests__/TrackedChat.test.ts new file mode 100644 index 000000000..c91b2d3d5 --- /dev/null +++ b/packages/sdk/server-ai/__tests__/TrackedChat.test.ts @@ -0,0 +1,231 @@ +import { TrackedChat } from '../src/api/chat/TrackedChat'; +import { ChatResponse } from '../src/api/chat/types'; +import { LDAIConfig, LDMessage } from '../src/api/config/LDAIConfig'; +import { LDAIConfigTracker } from '../src/api/config/LDAIConfigTracker'; +import { AIProvider } from '../src/api/providers/AIProvider'; + +describe('TrackedChat', () => { + let mockProvider: jest.Mocked; + let mockTracker: jest.Mocked; + let aiConfig: LDAIConfig; + + beforeEach(() => { + // Mock the AIProvider + mockProvider = { + invokeModel: jest.fn(), + } as any; + + // Mock the LDAIConfigTracker + mockTracker = { + trackMetricsOf: jest.fn(), + trackDuration: jest.fn(), + trackTokens: jest.fn(), + trackSuccess: jest.fn(), + trackError: jest.fn(), + trackFeedback: jest.fn(), + trackTimeToFirstToken: jest.fn(), + trackDurationOf: jest.fn(), + trackOpenAIMetrics: jest.fn(), + trackBedrockConverseMetrics: jest.fn(), + trackVercelAIMetrics: jest.fn(), + getSummary: jest.fn(), + } as any; + + // Create a basic AI config + aiConfig = { + enabled: true, + messages: [{ role: 'system', content: 'You are a helpful assistant.' }], + model: { name: 'gpt-4' }, + provider: { name: 'openai' }, + tracker: mockTracker, + toVercelAISDK: jest.fn(), + }; + }); + + describe('appendMessages', () => { + it('appends messages to the conversation history', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + const messagesToAppend: LDMessage[] = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + ]; + + chat.appendMessages(messagesToAppend); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(2); + expect(messages[0]).toEqual({ role: 'user', content: 'Hello' }); + expect(messages[1]).toEqual({ role: 'assistant', content: 'Hi there!' }); + }); + + it('appends multiple message batches sequentially', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'First message' }]); + chat.appendMessages([{ role: 'assistant', content: 'Second message' }]); + chat.appendMessages([{ role: 'user', content: 'Third message' }]); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(3); + expect(messages[0].content).toBe('First message'); + expect(messages[1].content).toBe('Second message'); + expect(messages[2].content).toBe('Third message'); + }); + + it('handles empty message array', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([]); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(0); + }); + }); + + describe('getMessages', () => { + it('returns only conversation history when includeConfigMessages is false', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([ + { role: 'user', content: 'User message' }, + { role: 'assistant', content: 'Assistant message' }, + ]); + + const messages = chat.getMessages(false); + + expect(messages).toHaveLength(2); + expect(messages[0]).toEqual({ role: 'user', content: 'User message' }); + expect(messages[1]).toEqual({ role: 'assistant', content: 'Assistant message' }); + }); + + it('returns only conversation history when includeConfigMessages is omitted (defaults to false)', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'User message' }]); + + const messages = chat.getMessages(); + + expect(messages).toHaveLength(1); + expect(messages[0]).toEqual({ role: 'user', content: 'User message' }); + }); + + it('returns config messages prepended when includeConfigMessages is true', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([ + { role: 'user', content: 'User message' }, + { role: 'assistant', content: 'Assistant message' }, + ]); + + const messages = chat.getMessages(true); + + expect(messages).toHaveLength(3); + expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' }); + expect(messages[1]).toEqual({ role: 'user', content: 'User message' }); + expect(messages[2]).toEqual({ role: 'assistant', content: 'Assistant message' }); + }); + + it('returns only config messages when no conversation history exists and includeConfigMessages is true', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + const messages = chat.getMessages(true); + + expect(messages).toHaveLength(1); + expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' }); + }); + + it('returns empty array when no messages exist and includeConfigMessages is false', () => { + const configWithoutMessages: LDAIConfig = { + ...aiConfig, + messages: [], + }; + const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider); + + const messages = chat.getMessages(false); + + expect(messages).toHaveLength(0); + }); + + it('returns a copy of the messages array (not a reference)', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'Original message' }]); + + const messages1 = chat.getMessages(); + const messages2 = chat.getMessages(); + + expect(messages1).not.toBe(messages2); + expect(messages1).toEqual(messages2); + + // Modifying returned array should not affect internal state + messages1.push({ role: 'assistant', content: 'Modified' }); + + const messages3 = chat.getMessages(); + expect(messages3).toHaveLength(1); + expect(messages3[0].content).toBe('Original message'); + }); + + it('handles undefined config messages gracefully', () => { + const configWithoutMessages: LDAIConfig = { + ...aiConfig, + messages: undefined, + }; + const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'User message' }]); + + const messagesWithConfig = chat.getMessages(true); + expect(messagesWithConfig).toHaveLength(1); + expect(messagesWithConfig[0].content).toBe('User message'); + + const messagesWithoutConfig = chat.getMessages(false); + expect(messagesWithoutConfig).toHaveLength(1); + expect(messagesWithoutConfig[0].content).toBe('User message'); + }); + }); + + describe('integration with invoke', () => { + it('adds messages from invoke to history accessible via getMessages', async () => { + const mockResponse: ChatResponse = { + message: { role: 'assistant', content: 'Response from model' }, + metrics: { success: true }, + }; + + mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func()); + + mockProvider.invokeModel.mockResolvedValue(mockResponse); + + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + await chat.invoke('Hello'); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(2); + expect(messages[0]).toEqual({ role: 'user', content: 'Hello' }); + expect(messages[1]).toEqual({ role: 'assistant', content: 'Response from model' }); + }); + + it('preserves appended messages when invoking', async () => { + const mockResponse: ChatResponse = { + message: { role: 'assistant', content: 'Response' }, + metrics: { success: true }, + }; + + mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func()); + + mockProvider.invokeModel.mockResolvedValue(mockResponse); + + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'Pre-appended message' }]); + await chat.invoke('New user input'); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(3); + expect(messages[0].content).toBe('Pre-appended message'); + expect(messages[1].content).toBe('New user input'); + expect(messages[2].content).toBe('Response'); + }); + }); +}); diff --git a/packages/sdk/server-ai/src/LDAIClientImpl.ts b/packages/sdk/server-ai/src/LDAIClientImpl.ts index 4ad002156..782bc05cb 100644 --- a/packages/sdk/server-ai/src/LDAIClientImpl.ts +++ b/packages/sdk/server-ai/src/LDAIClientImpl.ts @@ -1,9 +1,9 @@ import * as Mustache from 'mustache'; -import { LDContext } from '@launchdarkly/js-server-sdk-common'; +import { LDContext, LDLogger } from '@launchdarkly/js-server-sdk-common'; import { LDAIAgent, LDAIAgentConfig, LDAIAgentDefaults } from './api/agents'; -import { BaseTrackedChat, TrackedChatFactory } from './api/chat'; +import { TrackedChat, TrackedChatFactory } from './api/chat'; import { LDAIConfig, LDAIConfigTracker, @@ -58,7 +58,11 @@ interface EvaluationResult { } export class LDAIClientImpl implements LDAIClient { - constructor(private _ldClient: LDClientMin) {} + private _logger?: LDLogger; + + constructor(private _ldClient: LDClientMin) { + this._logger = _ldClient.logger; + } private _interpolateTemplate(template: string, variables: Record): string { return Mustache.render(template, variables, undefined, { escape: (item: any) => item }); @@ -229,20 +233,19 @@ export class LDAIClientImpl implements LDAIClient { context: LDContext, defaultValue: LDAIDefaults, variables?: Record, - ): Promise { + ): Promise { // Track chat initialization this._ldClient.track('$ld:ai:config:function:initChat', context, key, 1); const config = await this.config(key, context, defaultValue, variables); - // Return null if the configuration is disabled + // Return undefined if the configuration is disabled if (!config.enabled) { + this._logger?.info(`Chat configuration is disabled: ${key}`); return undefined; } // Create the TrackedChat instance based on the provider - const chat = await TrackedChatFactory.create(config, config.tracker); - - return chat; + return TrackedChatFactory.create(config, config.tracker, this._logger); } } diff --git a/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts index d820a8516..8830cee4e 100644 --- a/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts +++ b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts @@ -6,6 +6,7 @@ import { createBedrockTokenUsage, createOpenAiUsage, createVercelAISDKTokenUsage, + LDAIMetrics, LDFeedbackKind, LDTokenUsage, } from './api/metrics'; @@ -87,6 +88,37 @@ export class LDAIConfigTrackerImpl implements LDAIConfigTracker { this._ldClient.track('$ld:ai:generation:error', this._context, this._getTrackData(), 1); } + async trackMetricsOf( + metricsExtractor: (result: TRes) => LDAIMetrics, + func: () => Promise, + ): Promise { + let result: TRes; + + try { + result = await this.trackDurationOf(func); + } catch (err) { + this.trackError(); + throw err; + } + + // Extract metrics after successful AI call + const metrics = metricsExtractor(result); + + // Track success/error based on metrics + if (metrics.success) { + this.trackSuccess(); + } else { + this.trackError(); + } + + // Track token usage if available + if (metrics.usage) { + this.trackTokens(metrics.usage); + } + + return result; + } + async trackOpenAIMetrics< TRes extends { usage?: { diff --git a/packages/sdk/server-ai/src/LDClientMin.ts b/packages/sdk/server-ai/src/LDClientMin.ts index 2158c569c..864354c0f 100644 --- a/packages/sdk/server-ai/src/LDClientMin.ts +++ b/packages/sdk/server-ai/src/LDClientMin.ts @@ -1,4 +1,4 @@ -import { LDContext, LDFlagValue } from '@launchdarkly/js-server-sdk-common'; +import { LDContext, LDFlagValue, LDLogger } from '@launchdarkly/js-server-sdk-common'; /** * Interface which represents the required interface components for a sever SDK @@ -13,4 +13,6 @@ export interface LDClientMin { ): Promise; track(key: string, context: LDContext, data?: any, metricValue?: number): void; + + readonly logger?: LDLogger; } diff --git a/packages/sdk/server-ai/src/api/LDAIClient.ts b/packages/sdk/server-ai/src/api/LDAIClient.ts index 03d17c1d1..ccf309893 100644 --- a/packages/sdk/server-ai/src/api/LDAIClient.ts +++ b/packages/sdk/server-ai/src/api/LDAIClient.ts @@ -1,7 +1,7 @@ import { LDContext } from '@launchdarkly/js-server-sdk-common'; import { LDAIAgent, LDAIAgentConfig, LDAIAgentDefaults } from './agents'; -import { BaseTrackedChat } from './chat'; +import { TrackedChat } from './chat'; import { LDAIConfig, LDAIDefaults } from './config/LDAIConfig'; /** @@ -186,5 +186,5 @@ export interface LDAIClient { context: LDContext, defaultValue: LDAIDefaults, variables?: Record, - ): Promise; + ): Promise; } diff --git a/packages/sdk/server-ai/src/api/chat/BaseTrackedChat.ts b/packages/sdk/server-ai/src/api/chat/BaseTrackedChat.ts deleted file mode 100644 index e30c4bb1f..000000000 --- a/packages/sdk/server-ai/src/api/chat/BaseTrackedChat.ts +++ /dev/null @@ -1,82 +0,0 @@ -import { LDAIConfig, LDMessage } from '../config/LDAIConfig'; -import { LDAIConfigTracker } from '../config/LDAIConfigTracker'; -import { ChatResponse } from './TrackedChat'; - -/** - * Base implementation of TrackedChat that provides common functionality. - * This can be extended by provider-specific implementations. - */ -export abstract class BaseTrackedChat { - protected messages: LDMessage[]; - - constructor( - protected readonly aiConfig: LDAIConfig, - protected readonly tracker: LDAIConfigTracker, - ) { - this.messages = aiConfig.messages || []; - } - - /** - * Invoke the chat model with a prompt string. - * This method handles conversation management and tracking, delegating to the provider's invokeModel method. - */ - async invoke(prompt: string): Promise { - // Convert prompt string to LDMessage with role 'user' and add to conversation history - const userMessage: LDMessage = { - role: 'user', - content: prompt, - }; - this.messages.push(userMessage); - - // Delegate to provider-specific implementation with tracking - const response = await this.trackMetricsOf(() => this.invokeModel(this.messages)); - - // Add the assistant response to the conversation history - this.messages.push(response.message); - - return response; - } - - /** - * Abstract method that providers must implement to handle the actual model invocation. - * This method should convert messages to provider format, invoke the model, and return a ChatResponse. - */ - protected abstract invokeModel(messages: LDMessage[]): Promise; - - /** - * Track metrics for a ChatResponse execution. - * This method handles duration tracking, token usage tracking, and success/error tracking. - */ - protected async trackMetricsOf(callable: () => Promise): Promise { - return this.tracker.trackDurationOf(async () => { - try { - const result = await callable(); - - // Track token usage if available - if (result.usage) { - this.tracker.trackTokens(result.usage); - } - - this.tracker.trackSuccess(); - return result; - } catch (error) { - this.tracker.trackError(); - throw error; - } - }); - } - - /** - * Get the underlying AI configuration used to initialize this TrackedChat. - */ - getConfig(): LDAIConfig { - return this.aiConfig; - } - - /** - * Get the underlying AI configuration tracker used to initialize this TrackedChat. - */ - getTracker(): LDAIConfigTracker { - return this.tracker; - } -} diff --git a/packages/sdk/server-ai/src/api/chat/TrackedChat.ts b/packages/sdk/server-ai/src/api/chat/TrackedChat.ts index f09a92f99..68a9af2f0 100644 --- a/packages/sdk/server-ai/src/api/chat/TrackedChat.ts +++ b/packages/sdk/server-ai/src/api/chat/TrackedChat.ts @@ -1,60 +1,100 @@ import { LDAIConfig, LDMessage } from '../config/LDAIConfig'; import { LDAIConfigTracker } from '../config/LDAIConfigTracker'; -import { LDTokenUsage } from '../metrics/LDTokenUsage'; +import { AIProvider } from '../providers/AIProvider'; +import { ChatResponse } from './types'; /** - * Chat response structure. + * Concrete implementation of TrackedChat that provides chat functionality + * by delegating to an AIProvider implementation. + * This class handles conversation management and tracking, while delegating + * the actual model invocation to the provider. */ -export interface ChatResponse { - /** - * The response message from the AI. - */ - message: LDMessage; +export class TrackedChat { + protected messages: LDMessage[]; + + constructor( + protected readonly aiConfig: LDAIConfig, + protected readonly tracker: LDAIConfigTracker, + protected readonly provider: AIProvider, + ) { + this.messages = []; + } /** - * Token usage information. + * Invoke the chat model with a prompt string. + * This method handles conversation management and tracking, delegating to the provider's invokeModel method. */ - usage?: LDTokenUsage; + async invoke(prompt: string): Promise { + // Convert prompt string to LDMessage with role 'user' and add to conversation history + const userMessage: LDMessage = { + role: 'user', + content: prompt, + }; + this.messages.push(userMessage); + + // Prepend config messages to conversation history for model invocation + const configMessages = this.aiConfig.messages || []; + const allMessages = [...configMessages, ...this.messages]; + + // Delegate to provider-specific implementation with tracking + const response = await this.tracker.trackMetricsOf( + (result: ChatResponse) => result.metrics, + () => this.provider.invokeModel(allMessages), + ); + + // Add the assistant response to the conversation history + this.messages.push(response.message); + + return response; + } /** - * Additional metadata from the provider. + * Get the underlying AI configuration used to initialize this TrackedChat. */ - metadata?: Record; -} + getConfig(): LDAIConfig { + return this.aiConfig; + } -/** - * Interface for provider-specific tracked chat implementations. - */ -export interface ProviderTrackedChat { /** - * Invoke the chat model with the provided messages. - * This method provides a consistent interface for chat model execution while integrating - * LaunchDarkly-specific functionality. - * - * @param prompt A prompt string that will be converted to a user message and added to the conversation history. - * @returns A promise that resolves to the chat response. + * Get the underlying AI configuration tracker used to initialize this TrackedChat. */ - invoke(prompt: string): Promise; + getTracker(): LDAIConfigTracker { + return this.tracker; + } /** - * Get the underlying AI configuration used to initialize this TrackedChat. - * - * @returns The AI configuration. + * Get the underlying AI provider instance. + * This provides direct access to the provider for advanced use cases. */ - getConfig(): LDAIConfig; + getProvider(): AIProvider { + return this.provider; + } /** - * Get the underlying AI configuration tracker used to initialize this TrackedChat. + * Append messages to the conversation history. + * Adds messages to the conversation history without invoking the model, + * which is useful for managing multi-turn conversations or injecting context. * - * @returns The AI configuration tracker. + * @param messages Array of messages to append to the conversation history */ - getTracker(): LDAIConfigTracker; + appendMessages(messages: LDMessage[]): void { + this.messages.push(...messages); + } /** - * Get the underlying provider-specific chat model instance. - * This provides direct access to the underlying provider chat model for advanced use cases. + * Get all messages in the conversation history. * - * @returns The configured provider-specific chat model instance. + * @param includeConfigMessages Whether to include the config messages from the AIConfig. + * Defaults to false. + * @returns Array of messages. When includeConfigMessages is true, returns both config + * messages and conversation history with config messages prepended. When false, + * returns only the conversation history messages. */ - getChatModel(): unknown; + getMessages(includeConfigMessages: boolean = false): LDMessage[] { + if (includeConfigMessages) { + const configMessages = this.aiConfig.messages || []; + return [...configMessages, ...this.messages]; + } + return [...this.messages]; + } } diff --git a/packages/sdk/server-ai/src/api/chat/TrackedChatFactory.ts b/packages/sdk/server-ai/src/api/chat/TrackedChatFactory.ts index d6e3e1768..7e8052b1d 100644 --- a/packages/sdk/server-ai/src/api/chat/TrackedChatFactory.ts +++ b/packages/sdk/server-ai/src/api/chat/TrackedChatFactory.ts @@ -1,6 +1,9 @@ +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; + import { LDAIConfig } from '../config/LDAIConfig'; import { LDAIConfigTracker } from '../config/LDAIConfigTracker'; -import { BaseTrackedChat } from './BaseTrackedChat'; +import { AIProvider } from '../providers/AIProvider'; +import { TrackedChat } from './TrackedChat'; /** * Factory for creating TrackedChat instances based on the provider configuration. @@ -10,58 +13,82 @@ export class TrackedChatFactory { * Create a TrackedChat instance based on the AI configuration. * This method attempts to load provider-specific implementations dynamically. * Returns undefined if the provider is not supported. + * + * @param aiConfig The AI configuration + * @param tracker The tracker for AI operations + * @param logger Optional logger for logging provider initialization */ static async create( aiConfig: LDAIConfig, tracker: LDAIConfigTracker, - ): Promise { + logger?: LDLogger, + ): Promise { + const provider = await this._createAIProvider(aiConfig, logger); + if (!provider) { + logger?.warn( + `Provider is not supported or failed to initialize: ${aiConfig.provider?.name ?? 'unknown'}`, + ); + return undefined; + } + + logger?.debug(`Successfully created TrackedChat for provider: ${aiConfig.provider?.name}`); + return new TrackedChat(aiConfig, tracker, provider); + } + + /** + * Create an AIProvider instance based on the AI configuration. + * This method attempts to load provider-specific implementations dynamically. + */ + private static async _createAIProvider( + aiConfig: LDAIConfig, + logger?: LDLogger, + ): Promise { const providerName = aiConfig.provider?.name?.toLowerCase(); - let trackedChat: BaseTrackedChat | undefined; + logger?.debug(`Attempting to create AI provider: ${providerName ?? 'unknown'}`); + let provider: AIProvider | undefined; // Try specific implementations for the provider switch (providerName) { case 'openai': - trackedChat = undefined; + // TODO: Return OpenAI AIProvider implementation when available + provider = undefined; break; case 'bedrock': - trackedChat = undefined; + // TODO: Return Bedrock AIProvider implementation when available + provider = undefined; break; default: - trackedChat = undefined; + provider = undefined; } - // If no specific implementation worked, try LangChain as fallback - if (!trackedChat) { - trackedChat = await this._createLangChainTrackedChat(aiConfig, tracker); + // If no specific implementation worked, try the multi-provider packages + if (!provider) { + provider = await this._createLangChainProvider(aiConfig, logger); } - // If LangChain didn't work, try Vercel as fallback - if (!trackedChat) { - // TODO: Return Vercel AI SDK implementation when available - // trackedChat = this._createVercelTrackedChat(aiConfig, tracker); - } - - return trackedChat; + return provider; } /** - * Create a LangChain TrackedChat instance if the LangChain provider is available. + * Create a LangChain AIProvider instance if the LangChain provider is available. */ - private static async _createLangChainTrackedChat( + private static async _createLangChainProvider( aiConfig: LDAIConfig, - tracker: LDAIConfigTracker, - ): Promise { + logger?: LDLogger, + ): Promise { try { + logger?.debug('Attempting to load LangChain provider'); // Try to dynamically import the LangChain provider // This will work if @launchdarkly/server-sdk-ai-langchain is installed - // eslint-disable-next-line @typescript-eslint/no-require-imports, import/no-extraneous-dependencies - const { LangChainTrackedChat, LangChainProvider } = require('@launchdarkly/server-sdk-ai-langchain'); + // eslint-disable-next-line import/no-extraneous-dependencies, global-require + const { LangChainProvider } = require('@launchdarkly/server-sdk-ai-langchain'); - // Build the LLM during factory creation to catch errors early - const llm = await LangChainProvider.createLangChainModel(aiConfig); - return new LangChainTrackedChat(aiConfig, tracker, llm); + const provider = await LangChainProvider.create(aiConfig); + logger?.debug('Successfully created LangChain provider'); + return provider; } catch (error) { - // If the LangChain provider is not available or LLM creation fails, return undefined + // If the LangChain provider is not available or creation fails, return undefined + logger?.error(`Error creating LangChain provider: ${error}`); return undefined; } } diff --git a/packages/sdk/server-ai/src/api/chat/index.ts b/packages/sdk/server-ai/src/api/chat/index.ts index b24488a85..c95ec22e3 100644 --- a/packages/sdk/server-ai/src/api/chat/index.ts +++ b/packages/sdk/server-ai/src/api/chat/index.ts @@ -1,3 +1,3 @@ -export * from './BaseTrackedChat'; +export * from './types'; export * from './TrackedChat'; export * from './TrackedChatFactory'; diff --git a/packages/sdk/server-ai/src/api/chat/types.ts b/packages/sdk/server-ai/src/api/chat/types.ts new file mode 100644 index 000000000..804bb2145 --- /dev/null +++ b/packages/sdk/server-ai/src/api/chat/types.ts @@ -0,0 +1,17 @@ +import { LDMessage } from '../config/LDAIConfig'; +import { LDAIMetrics } from '../metrics/LDAIMetrics'; + +/** + * Chat response structure. + */ +export interface ChatResponse { + /** + * The response message from the AI. + */ + message: LDMessage; + + /** + * Metrics information including success status and token usage. + */ + metrics: LDAIMetrics; +} diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts index 444060ef3..7f0b3f579 100644 --- a/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts +++ b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts @@ -1,4 +1,4 @@ -import { LDFeedbackKind, LDTokenUsage } from '../metrics'; +import { LDAIMetrics, LDFeedbackKind, LDTokenUsage } from '../metrics'; /** * Metrics which have been tracked. @@ -87,6 +87,25 @@ export interface LDAIConfigTracker { */ trackDurationOf(func: () => Promise): Promise; + /** + * Track metrics for a generic AI operation. + * + * This function will track the duration of the operation, extract metrics using the provided + * metrics extractor function, and track success or error status accordingly. + * + * If the provided function throws, then this method will also throw. + * In the case the provided function throws, this function will record the duration and an error. + * A failed operation will not have any token usage data. + * + * @param metricsExtractor Function that extracts LDAIMetrics from the operation result + * @param func Function which executes the operation + * @returns The result of the operation + */ + trackMetricsOf( + metricsExtractor: (result: TRes) => LDAIMetrics, + func: () => Promise, + ): Promise; + /** * Track an OpenAI operation. * diff --git a/packages/sdk/server-ai/src/api/index.ts b/packages/sdk/server-ai/src/api/index.ts index 158177089..cd27112f7 100644 --- a/packages/sdk/server-ai/src/api/index.ts +++ b/packages/sdk/server-ai/src/api/index.ts @@ -3,3 +3,4 @@ export * from './agents'; export * from './chat'; export * from './metrics'; export * from './LDAIClient'; +export * from './providers'; diff --git a/packages/sdk/server-ai/src/api/metrics/LDAIMetrics.ts b/packages/sdk/server-ai/src/api/metrics/LDAIMetrics.ts new file mode 100644 index 000000000..3b0fb99ec --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/LDAIMetrics.ts @@ -0,0 +1,18 @@ +import { LDTokenUsage } from './LDTokenUsage'; + +/** + * Metrics information for AI operations that includes success status and token usage. + * This class combines success/failure tracking with token usage metrics. + */ +export interface LDAIMetrics { + /** + * Whether the AI operation was successful. + */ + success: boolean; + + /** + * Token usage information for the operation. + * This will be undefined if no token usage data is available. + */ + usage?: LDTokenUsage; +} diff --git a/packages/sdk/server-ai/src/api/metrics/index.ts b/packages/sdk/server-ai/src/api/metrics/index.ts index 157fbd593..a7026f62d 100644 --- a/packages/sdk/server-ai/src/api/metrics/index.ts +++ b/packages/sdk/server-ai/src/api/metrics/index.ts @@ -1,5 +1,6 @@ export * from './BedrockTokenUsage'; export * from './OpenAiUsage'; export * from './LDFeedbackKind'; +export * from './LDAIMetrics'; export * from './LDTokenUsage'; export * from './VercelAISDKTokenUsage'; diff --git a/packages/sdk/server-ai/src/api/providers/AIProvider.ts b/packages/sdk/server-ai/src/api/providers/AIProvider.ts new file mode 100644 index 000000000..c64af9d71 --- /dev/null +++ b/packages/sdk/server-ai/src/api/providers/AIProvider.ts @@ -0,0 +1,35 @@ +import { ChatResponse } from '../chat/types'; +import { LDAIConfig, LDMessage } from '../config/LDAIConfig'; + +/** + * Abstract base class for AI providers that implement chat model functionality. + * This class provides the contract that all provider implementations must follow + * to integrate with LaunchDarkly's tracking and configuration capabilities. + * + * Following the AICHAT spec recommendation to use base classes with non-abstract methods + * for better extensibility and backwards compatibility. + */ +export abstract class AIProvider { + /** + * Invoke the chat model with an array of messages. + * This method should convert messages to provider format, invoke the model, + * and return a ChatResponse with the result and metrics. + * + * @param messages Array of LDMessage objects representing the conversation + * @returns Promise that resolves to a ChatResponse containing the model's response + */ + abstract invokeModel(messages: LDMessage[]): Promise; + + /** + * Static method that constructs an instance of the provider. + * Each provider implementation must provide their own static create method + * that accepts an AIConfig and returns a configured instance. + * + * @param aiConfig The LaunchDarkly AI configuration + * @returns Promise that resolves to a configured provider instance + */ + // eslint-disable-next-line @typescript-eslint/no-unused-vars + static async create(aiConfig: LDAIConfig): Promise { + throw new Error('Provider implementations must override the static create method'); + } +} diff --git a/packages/sdk/server-ai/src/api/providers/index.ts b/packages/sdk/server-ai/src/api/providers/index.ts new file mode 100644 index 000000000..3b4d3f90a --- /dev/null +++ b/packages/sdk/server-ai/src/api/providers/index.ts @@ -0,0 +1 @@ +export * from './AIProvider'; diff --git a/packages/sdk/server-ai/tsconfig.eslint.json b/packages/sdk/server-ai/tsconfig.eslint.json index 56c9b3830..156dde825 100644 --- a/packages/sdk/server-ai/tsconfig.eslint.json +++ b/packages/sdk/server-ai/tsconfig.eslint.json @@ -1,5 +1,5 @@ { "extends": "./tsconfig.json", - "include": ["/**/*.ts"], - "exclude": ["node_modules"] + "include": ["**/*.ts"], + "exclude": ["node_modules", "dist"] }