Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import { AIMessage, HumanMessage, SystemMessage } from 'langchain/schema';
import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';

import { LangChainProvider } from '../src/LangChainProvider';

// Mock LangChain dependencies
jest.mock('langchain/chat_models/universal', () => ({
initChatModel: jest.fn(),
}));

describe('LangChainProvider', () => {
describe('convertMessagesToLangChain', () => {
it('converts system messages to SystemMessage', () => {
Expand Down Expand Up @@ -49,7 +54,7 @@ describe('LangChainProvider', () => {
const messages = [{ role: 'unknown' as any, content: 'Test message' }];

expect(() => LangChainProvider.convertMessagesToLangChain(messages)).toThrow(
'Unsupported message role: unknown'
'Unsupported message role: unknown',
);
});

Expand All @@ -59,4 +64,53 @@ describe('LangChainProvider', () => {
expect(result).toHaveLength(0);
});
});

describe('createAIMetrics', () => {
it('creates metrics with success=true and token usage', () => {
const mockResponse = new AIMessage('Test response');
mockResponse.response_metadata = {
tokenUsage: {
totalTokens: 100,
promptTokens: 50,
completionTokens: 50,
},
};

const result = LangChainProvider.createAIMetrics(mockResponse);

expect(result).toEqual({
success: true,
usage: {
total: 100,
input: 50,
output: 50,
},
});
});

it('creates metrics with success=true and no usage when metadata is missing', () => {
const mockResponse = new AIMessage('Test response');

const result = LangChainProvider.createAIMetrics(mockResponse);

expect(result).toEqual({
success: true,
usage: undefined,
});
});
});

describe('mapProvider', () => {
it('maps gemini to google-genai', () => {
expect(LangChainProvider.mapProvider('gemini')).toBe('google-genai');
expect(LangChainProvider.mapProvider('Gemini')).toBe('google-genai');
expect(LangChainProvider.mapProvider('GEMINI')).toBe('google-genai');
});

it('returns provider name unchanged for unmapped providers', () => {
expect(LangChainProvider.mapProvider('openai')).toBe('openai');
expect(LangChainProvider.mapProvider('anthropic')).toBe('anthropic');
expect(LangChainProvider.mapProvider('unknown')).toBe('unknown');
});
});
});
10 changes: 4 additions & 6 deletions packages/ai-providers/server-ai-langchain/jest.config.js
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
module.exports = {
preset: 'ts-jest',
transform: { '^.+\\.ts?$': 'ts-jest' },
testMatch: ['**/__tests__/**/*test.ts?(x)'],
testEnvironment: 'node',
roots: ['<rootDir>/src'],
testMatch: ['**/__tests__/**/*.test.ts'],
collectCoverageFrom: ['src/**/*.ts', '!src/**/*.d.ts'],
coverageDirectory: 'coverage',
coverageReporters: ['text', 'lcov', 'html'],
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
collectCoverageFrom: ['src/**/*.ts'],
};
155 changes: 108 additions & 47 deletions packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,95 @@ import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages
import { initChatModel } from 'langchain/chat_models/universal';

import {
AIProvider,
ChatResponse,
LDAIConfig,
LDAIConfigTracker,
LDAIMetrics,
LDMessage,
LDTokenUsage,
} from '@launchdarkly/server-sdk-ai';

/**
* LangChain provider utilities and helper functions.
* LangChain implementation of AIProvider.
* This provider integrates LangChain models with LaunchDarkly's tracking capabilities.
*/
export class LangChainProvider {
export class LangChainProvider extends AIProvider {
private _llm: BaseChatModel;

constructor(llm: BaseChatModel) {
super();
this._llm = llm;
}

// =============================================================================
// MAIN FACTORY METHOD
// =============================================================================

/**
* Static factory method to create a LangChain AIProvider from an AI configuration.
*/
static async create(aiConfig: LDAIConfig): Promise<LangChainProvider> {
const llm = await LangChainProvider.createLangChainModel(aiConfig);
return new LangChainProvider(llm);
}

// =============================================================================
// INSTANCE METHODS (AIProvider Implementation)
// =============================================================================

/**
* Invoke the LangChain model with an array of messages.
*/
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
// Convert LDMessage[] to LangChain messages
const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages);

// Get the LangChain response
const response: AIMessage = await this._llm.invoke(langchainMessages);

// Handle different content types from LangChain
let content: string;
if (typeof response.content === 'string') {
content = response.content;
} else if (Array.isArray(response.content)) {
// Handle complex content (e.g., with images)
content = response.content
.map((item: any) => {
if (typeof item === 'string') return item;
if (item.type === 'text') return item.text;
return '';
})
.join('');
} else {
content = String(response.content);
}

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content,
};

// Extract metrics including token usage and success status
const metrics = LangChainProvider.createAIMetrics(response);

return {
message: assistantMessage,
metrics,
};
}

/**
* Get the underlying LangChain model instance.
*/
getChatModel(): BaseChatModel {
return this._llm;
}

// =============================================================================
// STATIC UTILITY METHODS
// =============================================================================

/**
* Map LaunchDarkly provider names to LangChain provider names.
* This method enables seamless integration between LaunchDarkly's standardized
Expand All @@ -29,21 +108,35 @@ export class LangChainProvider {
}

/**
* Create token usage information from a LangChain provider response.
* This method extracts token usage information from LangChain responses
* and returns a LaunchDarkly TokenUsage object.
* Create AI metrics information from a LangChain provider response.
* This method extracts token usage information and success status from LangChain responses
* and returns a LaunchDarkly AIMetrics object.
*
* @example
* ```typescript
* // Use with tracker.trackMetricsOf for automatic tracking
* const response = await tracker.trackMetricsOf(
* (result: AIMessage) => LangChainProvider.createAIMetrics(result),
* () => llm.invoke(messages)
* );
* ```
*/
static createTokenUsage(langChainResponse: AIMessage): LDTokenUsage | undefined {
if (!langChainResponse?.response_metadata?.tokenUsage) {
return undefined;
static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics {
// Extract token usage if available
let usage: LDTokenUsage | undefined;
if (langChainResponse?.response_metadata?.tokenUsage) {
const { tokenUsage } = langChainResponse.response_metadata;
usage = {
total: tokenUsage.totalTokens || 0,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a thought: should this be promptTokens + completionTokens instead? I don't know if totalTokens would ever be different than prompt+completion, but if it ever was I could see users being confused

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am trying to find the conversation where this came up before. I believe @kinyoklion mentioned it. We used total tokens because it is possible some models may use additional tokens not accounted for in just the prompt / completion tokens.

Copy link
Member

@kinyoklion kinyoklion Oct 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that is the thought. We originally didn't know that prompt + completion = total would be a guarantee. Which seems more likely if we eventually care about thinking tokens.

For example total may become prompt + thinking + completion in which case our total would be incorrect if it was just prompt + token.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Though some providers currently aren't charging for thinking so they are excluding it from their total calculation. But I could see it change either way.

input: tokenUsage.promptTokens || 0,
output: tokenUsage.completionTokens || 0,
};
}

const { tokenUsage } = langChainResponse.response_metadata;

// LangChain responses that complete successfully are considered successful
return {
total: tokenUsage.totalTokens || 0,
input: tokenUsage.promptTokens || 0,
output: tokenUsage.completionTokens || 0,
success: true,
usage,
};
}

Expand All @@ -69,38 +162,6 @@ export class LangChainProvider {
});
}

/**
* Track metrics for a LangChain callable execution.
* This helper method enables developers to work directly with LangChain callables
* while ensuring consistent tracking behavior.
*/
static async trackMetricsOf(
tracker: LDAIConfigTracker,
callable: () => Promise<AIMessage>,
): Promise<AIMessage> {
return tracker.trackDurationOf(async () => {
try {
const result = await callable();

// Extract and track token usage if available
const tokenUsage = this.createTokenUsage(result);
if (tokenUsage) {
tracker.trackTokens({
total: tokenUsage.total,
input: tokenUsage.input,
output: tokenUsage.output,
});
}

tracker.trackSuccess();
return result;
} catch (error) {
tracker.trackError();
throw error;
}
});
}

/**
* Create a LangChain model from an AI configuration.
* This public helper method enables developers to initialize their own LangChain models
Expand All @@ -116,7 +177,7 @@ export class LangChainProvider {

// Use LangChain's universal initChatModel to support multiple providers
return initChatModel(modelName, {
modelProvider: this.mapProvider(provider),
modelProvider: LangChainProvider.mapProvider(provider),
...parameters,
});
}
Expand Down

This file was deleted.

1 change: 0 additions & 1 deletion packages/ai-providers/server-ai-langchain/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,4 @@
* @packageDocumentation
*/

export * from './LangChainTrackedChat';
export * from './LangChainProvider';
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
{
"extends": "./tsconfig.json",
"include": ["src/**/*", "**/*.test.ts", "**/*.spec.ts"]
"include": ["/**/*.ts"],
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: ESLint Include Pattern Uses Absolute Path

The include pattern ["/**/*.ts"] in tsconfig.eslint.json uses a leading slash, which makes it an absolute path from the filesystem root. This prevents ESLint from finding TypeScript files within the package, as it expects a relative path from the project directory.

Fix in Cursor Fix in Web

"exclude": ["node_modules"]
}
Loading