From fe29f8f15aee3823b2ba52c276f9450fc4380e00 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 3 Oct 2025 15:22:37 +0000 Subject: [PATCH] Convert LangChain implementation to new AIProvider interface --- .../__tests__/LangChainProvider.test.ts | 58 ++++++- .../server-ai-langchain/jest.config.js | 10 +- .../src/LangChainProvider.ts | 155 ++++++++++++------ .../src/LangChainTrackedChat.ts | 86 ---------- .../server-ai-langchain/src/index.ts | 1 - .../server-ai-langchain/tsconfig.eslint.json | 3 +- 6 files changed, 170 insertions(+), 143 deletions(-) delete mode 100644 packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts diff --git a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts index 0c1cd38f23..f4bab13bf7 100644 --- a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts +++ b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts @@ -1,7 +1,12 @@ -import { AIMessage, HumanMessage, SystemMessage } from 'langchain/schema'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; import { LangChainProvider } from '../src/LangChainProvider'; +// Mock LangChain dependencies +jest.mock('langchain/chat_models/universal', () => ({ + initChatModel: jest.fn(), +})); + describe('LangChainProvider', () => { describe('convertMessagesToLangChain', () => { it('converts system messages to SystemMessage', () => { @@ -49,7 +54,7 @@ describe('LangChainProvider', () => { const messages = [{ role: 'unknown' as any, content: 'Test message' }]; expect(() => LangChainProvider.convertMessagesToLangChain(messages)).toThrow( - 'Unsupported message role: unknown' + 'Unsupported message role: unknown', ); }); @@ -59,4 +64,53 @@ describe('LangChainProvider', () => { expect(result).toHaveLength(0); }); }); + + describe('createAIMetrics', () => { + it('creates metrics with success=true and token usage', () => { + const mockResponse = new AIMessage('Test response'); + mockResponse.response_metadata = { + tokenUsage: { + totalTokens: 100, + promptTokens: 50, + completionTokens: 50, + }, + }; + + const result = LangChainProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 100, + input: 50, + output: 50, + }, + }); + }); + + it('creates metrics with success=true and no usage when metadata is missing', () => { + const mockResponse = new AIMessage('Test response'); + + const result = LangChainProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: undefined, + }); + }); + }); + + describe('mapProvider', () => { + it('maps gemini to google-genai', () => { + expect(LangChainProvider.mapProvider('gemini')).toBe('google-genai'); + expect(LangChainProvider.mapProvider('Gemini')).toBe('google-genai'); + expect(LangChainProvider.mapProvider('GEMINI')).toBe('google-genai'); + }); + + it('returns provider name unchanged for unmapped providers', () => { + expect(LangChainProvider.mapProvider('openai')).toBe('openai'); + expect(LangChainProvider.mapProvider('anthropic')).toBe('anthropic'); + expect(LangChainProvider.mapProvider('unknown')).toBe('unknown'); + }); + }); }); diff --git a/packages/ai-providers/server-ai-langchain/jest.config.js b/packages/ai-providers/server-ai-langchain/jest.config.js index 9e3ea08f04..f106eb3bc9 100644 --- a/packages/ai-providers/server-ai-langchain/jest.config.js +++ b/packages/ai-providers/server-ai-langchain/jest.config.js @@ -1,9 +1,7 @@ module.exports = { - preset: 'ts-jest', + transform: { '^.+\\.ts?$': 'ts-jest' }, + testMatch: ['**/__tests__/**/*test.ts?(x)'], testEnvironment: 'node', - roots: ['/src'], - testMatch: ['**/__tests__/**/*.test.ts'], - collectCoverageFrom: ['src/**/*.ts', '!src/**/*.d.ts'], - coverageDirectory: 'coverage', - coverageReporters: ['text', 'lcov', 'html'], + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + collectCoverageFrom: ['src/**/*.ts'], }; diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts index b80134dd7c..bdd76273fd 100644 --- a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -3,16 +3,95 @@ import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages import { initChatModel } from 'langchain/chat_models/universal'; import { + AIProvider, + ChatResponse, LDAIConfig, - LDAIConfigTracker, + LDAIMetrics, LDMessage, LDTokenUsage, } from '@launchdarkly/server-sdk-ai'; /** - * LangChain provider utilities and helper functions. + * LangChain implementation of AIProvider. + * This provider integrates LangChain models with LaunchDarkly's tracking capabilities. */ -export class LangChainProvider { +export class LangChainProvider extends AIProvider { + private _llm: BaseChatModel; + + constructor(llm: BaseChatModel) { + super(); + this._llm = llm; + } + + // ============================================================================= + // MAIN FACTORY METHOD + // ============================================================================= + + /** + * Static factory method to create a LangChain AIProvider from an AI configuration. + */ + static async create(aiConfig: LDAIConfig): Promise { + const llm = await LangChainProvider.createLangChainModel(aiConfig); + return new LangChainProvider(llm); + } + + // ============================================================================= + // INSTANCE METHODS (AIProvider Implementation) + // ============================================================================= + + /** + * Invoke the LangChain model with an array of messages. + */ + async invokeModel(messages: LDMessage[]): Promise { + // Convert LDMessage[] to LangChain messages + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); + + // Get the LangChain response + const response: AIMessage = await this._llm.invoke(langchainMessages); + + // Handle different content types from LangChain + let content: string; + if (typeof response.content === 'string') { + content = response.content; + } else if (Array.isArray(response.content)) { + // Handle complex content (e.g., with images) + content = response.content + .map((item: any) => { + if (typeof item === 'string') return item; + if (item.type === 'text') return item.text; + return ''; + }) + .join(''); + } else { + content = String(response.content); + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; + + // Extract metrics including token usage and success status + const metrics = LangChainProvider.createAIMetrics(response); + + return { + message: assistantMessage, + metrics, + }; + } + + /** + * Get the underlying LangChain model instance. + */ + getChatModel(): BaseChatModel { + return this._llm; + } + + // ============================================================================= + // STATIC UTILITY METHODS + // ============================================================================= + /** * Map LaunchDarkly provider names to LangChain provider names. * This method enables seamless integration between LaunchDarkly's standardized @@ -29,21 +108,35 @@ export class LangChainProvider { } /** - * Create token usage information from a LangChain provider response. - * This method extracts token usage information from LangChain responses - * and returns a LaunchDarkly TokenUsage object. + * Create AI metrics information from a LangChain provider response. + * This method extracts token usage information and success status from LangChain responses + * and returns a LaunchDarkly AIMetrics object. + * + * @example + * ```typescript + * // Use with tracker.trackMetricsOf for automatic tracking + * const response = await tracker.trackMetricsOf( + * (result: AIMessage) => LangChainProvider.createAIMetrics(result), + * () => llm.invoke(messages) + * ); + * ``` */ - static createTokenUsage(langChainResponse: AIMessage): LDTokenUsage | undefined { - if (!langChainResponse?.response_metadata?.tokenUsage) { - return undefined; + static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics { + // Extract token usage if available + let usage: LDTokenUsage | undefined; + if (langChainResponse?.response_metadata?.tokenUsage) { + const { tokenUsage } = langChainResponse.response_metadata; + usage = { + total: tokenUsage.totalTokens || 0, + input: tokenUsage.promptTokens || 0, + output: tokenUsage.completionTokens || 0, + }; } - const { tokenUsage } = langChainResponse.response_metadata; - + // LangChain responses that complete successfully are considered successful return { - total: tokenUsage.totalTokens || 0, - input: tokenUsage.promptTokens || 0, - output: tokenUsage.completionTokens || 0, + success: true, + usage, }; } @@ -69,38 +162,6 @@ export class LangChainProvider { }); } - /** - * Track metrics for a LangChain callable execution. - * This helper method enables developers to work directly with LangChain callables - * while ensuring consistent tracking behavior. - */ - static async trackMetricsOf( - tracker: LDAIConfigTracker, - callable: () => Promise, - ): Promise { - return tracker.trackDurationOf(async () => { - try { - const result = await callable(); - - // Extract and track token usage if available - const tokenUsage = this.createTokenUsage(result); - if (tokenUsage) { - tracker.trackTokens({ - total: tokenUsage.total, - input: tokenUsage.input, - output: tokenUsage.output, - }); - } - - tracker.trackSuccess(); - return result; - } catch (error) { - tracker.trackError(); - throw error; - } - }); - } - /** * Create a LangChain model from an AI configuration. * This public helper method enables developers to initialize their own LangChain models @@ -116,7 +177,7 @@ export class LangChainProvider { // Use LangChain's universal initChatModel to support multiple providers return initChatModel(modelName, { - modelProvider: this.mapProvider(provider), + modelProvider: LangChainProvider.mapProvider(provider), ...parameters, }); } diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts b/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts deleted file mode 100644 index b0dd093dfb..0000000000 --- a/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts +++ /dev/null @@ -1,86 +0,0 @@ -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; - -import { - BaseTrackedChat, - ChatResponse, - LDAIConfig, - LDAIConfigTracker, - LDMessage, -} from '@launchdarkly/server-sdk-ai'; - -import { LangChainProvider } from './LangChainProvider'; - -/** - * LangChain-specific implementation of TrackedChat. - * This implementation integrates LangChain models with LaunchDarkly's tracking capabilities. - */ -export class LangChainTrackedChat extends BaseTrackedChat { - private _llm: BaseChatModel; - - constructor(aiConfig: LDAIConfig, tracker: LDAIConfigTracker, llm: BaseChatModel) { - super(aiConfig, tracker); - this._llm = llm; - } - - /** - * Provider-specific implementation that converts LDMessage[] to LangChain format, - * invokes the model, and returns a ChatResponse. - */ - protected async invokeModel(messages: LDMessage[]): Promise { - // Convert LDMessage[] to LangChain messages - const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); - - // Get the LangChain response - const response = await this._llm.invoke(langchainMessages); - - // Extract token usage if available using the helper method - const usage = LangChainProvider.createTokenUsage(response); - - // Handle different content types from LangChain - let content: string; - if (typeof response.content === 'string') { - content = response.content; - } else if (Array.isArray(response.content)) { - // Handle complex content (e.g., with images) - content = response.content - .map((item: any) => { - if (typeof item === 'string') return item; - if (item.type === 'text') return item.text; - return ''; - }) - .join(''); - } else { - content = String(response.content); - } - - // Create the assistant message - const assistantMessage: LDMessage = { - role: 'assistant', - content, - }; - - return { - message: assistantMessage, - usage, - }; - } - - /** - * LangChain-specific invoke method that accepts LangChain-native message types. - * This is the main implementation that does all the tracking and LangChain logic. - */ - async trackLangChainInvoke( - messages: (HumanMessage | SystemMessage | AIMessage)[], - ): Promise { - // Use the trackMetricsOf helper to handle all tracking automatically - return LangChainProvider.trackMetricsOf(this.tracker, () => this._llm.invoke(messages)); - } - - /** - * Get the underlying LangChain model instance. - */ - async getChatModel(): Promise { - return this._llm; - } -} diff --git a/packages/ai-providers/server-ai-langchain/src/index.ts b/packages/ai-providers/server-ai-langchain/src/index.ts index ea8dcd6bf4..63c20c4154 100644 --- a/packages/ai-providers/server-ai-langchain/src/index.ts +++ b/packages/ai-providers/server-ai-langchain/src/index.ts @@ -7,5 +7,4 @@ * @packageDocumentation */ -export * from './LangChainTrackedChat'; export * from './LangChainProvider'; diff --git a/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json index 67f3670709..56c9b38305 100644 --- a/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json +++ b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json @@ -1,4 +1,5 @@ { "extends": "./tsconfig.json", - "include": ["src/**/*", "**/*.test.ts", "**/*.spec.ts"] + "include": ["/**/*.ts"], + "exclude": ["node_modules"] }