From 6090da78647bcf459fa3b2df641e88d04389931f Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Oct 2025 17:01:50 +0000 Subject: [PATCH 01/10] feat: Add LangChain Provider for AI SDK --- .release-please-manifest.json | 29 +++-- package.json | 1 + .../__tests__/LangChainProvider.test.ts | 62 +++++++++ .../server-ai-langchain/jest.config.js | 9 ++ .../server-ai-langchain/package.json | 55 ++++++++ .../src/LangChainProvider.ts | 123 ++++++++++++++++++ .../src/LangChainTrackedChat.ts | 86 ++++++++++++ .../server-ai-langchain/src/index.ts | 11 ++ .../server-ai-langchain/tsconfig.eslint.json | 4 + .../server-ai-langchain/tsconfig.json | 20 +++ 10 files changed, 387 insertions(+), 13 deletions(-) create mode 100644 packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts create mode 100644 packages/ai-providers/server-ai-langchain/jest.config.js create mode 100644 packages/ai-providers/server-ai-langchain/package.json create mode 100644 packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts create mode 100644 packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts create mode 100644 packages/ai-providers/server-ai-langchain/src/index.ts create mode 100644 packages/ai-providers/server-ai-langchain/tsconfig.eslint.json create mode 100644 packages/ai-providers/server-ai-langchain/tsconfig.json diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d5001d1e2f..7d51191fcb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,22 +1,25 @@ { - "packages/shared/common": "2.19.0", - "packages/shared/sdk-server": "2.16.2", - "packages/sdk/server-node": "9.10.2", + "packages/ai-providers/server-ai-langchain": "0.1.0-alpha.0", + "packages/sdk/akamai-base": "3.0.10", + "packages/sdk/akamai-edgekv": "1.4.12", + "packages/sdk/browser": "0.8.1", "packages/sdk/cloudflare": "2.7.10", + "packages/sdk/combined-browser": "0.0.0", "packages/sdk/fastly": "0.2.1", - "packages/shared/sdk-server-edge": "2.6.9", + "packages/sdk/react-native": "10.11.0", + "packages/sdk/react-universal": "0.0.1", + "packages/sdk/server-ai": "0.11.4", + "packages/sdk/server-node": "9.10.2", + "packages/sdk/svelte": "0.1.0", "packages/sdk/vercel": "1.3.34", - "packages/sdk/akamai-base": "3.0.10", - "packages/sdk/akamai-edgekv": "1.4.12", "packages/shared/akamai-edgeworker-sdk": "2.0.10", + "packages/shared/common": "2.19.0", + "packages/shared/sdk-client": "1.15.1", + "packages/shared/sdk-server": "2.16.2", + "packages/shared/sdk-server-edge": "2.6.9", "packages/store/node-server-sdk-dynamodb": "6.2.14", "packages/store/node-server-sdk-redis": "4.2.14", - "packages/shared/sdk-client": "1.15.1", - "packages/sdk/react-native": "10.11.0", - "packages/telemetry/node-server-sdk-otel": "1.3.2", - "packages/sdk/browser": "0.8.1", - "packages/sdk/server-ai": "0.11.4", "packages/telemetry/browser-telemetry": "1.0.11", - "packages/tooling/jest": "0.1.11", - "packages/sdk/combined-browser": "0.0.0" + "packages/telemetry/node-server-sdk-otel": "1.3.2", + "packages/tooling/jest": "0.1.11" } diff --git a/package.json b/package.json index 313d3cf351..e05ef41e6b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,7 @@ { "name": "@launchdarkly/js-core", "workspaces": [ + "packages/ai-providers/server-ai-langchain", "packages/shared/common", "packages/shared/sdk-client", "packages/shared/sdk-server", diff --git a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts new file mode 100644 index 0000000000..0c1cd38f23 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts @@ -0,0 +1,62 @@ +import { AIMessage, HumanMessage, SystemMessage } from 'langchain/schema'; + +import { LangChainProvider } from '../src/LangChainProvider'; + +describe('LangChainProvider', () => { + describe('convertMessagesToLangChain', () => { + it('converts system messages to SystemMessage', () => { + const messages = [{ role: 'system' as const, content: 'You are a helpful assistant.' }]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[0].content).toBe('You are a helpful assistant.'); + }); + + it('converts user messages to HumanMessage', () => { + const messages = [{ role: 'user' as const, content: 'Hello, how are you?' }]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(HumanMessage); + expect(result[0].content).toBe('Hello, how are you?'); + }); + + it('converts assistant messages to AIMessage', () => { + const messages = [{ role: 'assistant' as const, content: 'I am doing well, thank you!' }]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[0].content).toBe('I am doing well, thank you!'); + }); + + it('converts multiple messages in order', () => { + const messages = [ + { role: 'system' as const, content: 'You are a helpful assistant.' }, + { role: 'user' as const, content: 'What is the weather like?' }, + { role: 'assistant' as const, content: 'I cannot check the weather.' }, + ]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(3); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[1]).toBeInstanceOf(HumanMessage); + expect(result[2]).toBeInstanceOf(AIMessage); + }); + + it('throws error for unsupported message role', () => { + const messages = [{ role: 'unknown' as any, content: 'Test message' }]; + + expect(() => LangChainProvider.convertMessagesToLangChain(messages)).toThrow( + 'Unsupported message role: unknown' + ); + }); + + it('handles empty message array', () => { + const result = LangChainProvider.convertMessagesToLangChain([]); + + expect(result).toHaveLength(0); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-langchain/jest.config.js b/packages/ai-providers/server-ai-langchain/jest.config.js new file mode 100644 index 0000000000..9e3ea08f04 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/jest.config.js @@ -0,0 +1,9 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + roots: ['/src'], + testMatch: ['**/__tests__/**/*.test.ts'], + collectCoverageFrom: ['src/**/*.ts', '!src/**/*.d.ts'], + coverageDirectory: 'coverage', + coverageReporters: ['text', 'lcov', 'html'], +}; diff --git a/packages/ai-providers/server-ai-langchain/package.json b/packages/ai-providers/server-ai-langchain/package.json new file mode 100644 index 0000000000..a97c2dd95a --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/package.json @@ -0,0 +1,55 @@ +{ + "name": "@launchdarkly/server-sdk-ai-langchain", + "version": "0.1.0-alpha.0", + "description": "LaunchDarkly AI SDK LangChain Provider for Server-Side JavaScript", + "homepage": "https://github.com/launchdarkly/js-core/tree/main/packages/ai-providers/server-ai-langchain", + "repository": { + "type": "git", + "url": "https://github.com/launchdarkly/js-core.git" + }, + "main": "dist/index.js", + "types": "dist/index.d.ts", + "type": "commonjs", + "scripts": { + "build": "npx tsc", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test", + "test": "jest" + }, + "keywords": [ + "launchdarkly", + "ai", + "llm", + "langchain" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "@langchain/core": ">=0.2.21 <0.3.0", + "@launchdarkly/server-sdk-ai": "0.11.4", + "langchain": "^0.2.11" + }, + "devDependencies": { + "@launchdarkly/js-server-sdk-common": "2.16.2", + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@types/jest": "^29.5.3", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.6.1", + "prettier": "^3.0.0", + "ts-jest": "^29.1.1", + "typescript": "5.1.6" + }, + "peerDependencies": { + "@launchdarkly/js-server-sdk-common": "2.x" + } +} diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts new file mode 100644 index 0000000000..b80134dd7c --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -0,0 +1,123 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; +import { initChatModel } from 'langchain/chat_models/universal'; + +import { + LDAIConfig, + LDAIConfigTracker, + LDMessage, + LDTokenUsage, +} from '@launchdarkly/server-sdk-ai'; + +/** + * LangChain provider utilities and helper functions. + */ +export class LangChainProvider { + /** + * Map LaunchDarkly provider names to LangChain provider names. + * This method enables seamless integration between LaunchDarkly's standardized + * provider naming and LangChain's naming conventions. + */ + static mapProvider(ldProviderName: string): string { + const lowercasedName = ldProviderName.toLowerCase(); + + const mapping: Record = { + gemini: 'google-genai', + }; + + return mapping[lowercasedName] || lowercasedName; + } + + /** + * Create token usage information from a LangChain provider response. + * This method extracts token usage information from LangChain responses + * and returns a LaunchDarkly TokenUsage object. + */ + static createTokenUsage(langChainResponse: AIMessage): LDTokenUsage | undefined { + if (!langChainResponse?.response_metadata?.tokenUsage) { + return undefined; + } + + const { tokenUsage } = langChainResponse.response_metadata; + + return { + total: tokenUsage.totalTokens || 0, + input: tokenUsage.promptTokens || 0, + output: tokenUsage.completionTokens || 0, + }; + } + + /** + * Convert LaunchDarkly messages to LangChain messages. + * This helper method enables developers to work directly with LangChain message types + * while maintaining compatibility with LaunchDarkly's standardized message format. + */ + static convertMessagesToLangChain( + messages: LDMessage[], + ): (HumanMessage | SystemMessage | AIMessage)[] { + return messages.map((msg) => { + switch (msg.role) { + case 'system': + return new SystemMessage(msg.content); + case 'user': + return new HumanMessage(msg.content); + case 'assistant': + return new AIMessage(msg.content); + default: + throw new Error(`Unsupported message role: ${msg.role}`); + } + }); + } + + /** + * Track metrics for a LangChain callable execution. + * This helper method enables developers to work directly with LangChain callables + * while ensuring consistent tracking behavior. + */ + static async trackMetricsOf( + tracker: LDAIConfigTracker, + callable: () => Promise, + ): Promise { + return tracker.trackDurationOf(async () => { + try { + const result = await callable(); + + // Extract and track token usage if available + const tokenUsage = this.createTokenUsage(result); + if (tokenUsage) { + tracker.trackTokens({ + total: tokenUsage.total, + input: tokenUsage.input, + output: tokenUsage.output, + }); + } + + tracker.trackSuccess(); + return result; + } catch (error) { + tracker.trackError(); + throw error; + } + }); + } + + /** + * Create a LangChain model from an AI configuration. + * This public helper method enables developers to initialize their own LangChain models + * using LaunchDarkly AI configurations. + * + * @param aiConfig The LaunchDarkly AI configuration + * @returns A Promise that resolves to a configured LangChain BaseChatModel + */ + static async createLangChainModel(aiConfig: LDAIConfig): Promise { + const modelName = aiConfig.model?.name || ''; + const provider = aiConfig.provider?.name || ''; + const parameters = aiConfig.model?.parameters || {}; + + // Use LangChain's universal initChatModel to support multiple providers + return initChatModel(modelName, { + modelProvider: this.mapProvider(provider), + ...parameters, + }); + } +} diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts b/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts new file mode 100644 index 0000000000..b0dd093dfb --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts @@ -0,0 +1,86 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; + +import { + BaseTrackedChat, + ChatResponse, + LDAIConfig, + LDAIConfigTracker, + LDMessage, +} from '@launchdarkly/server-sdk-ai'; + +import { LangChainProvider } from './LangChainProvider'; + +/** + * LangChain-specific implementation of TrackedChat. + * This implementation integrates LangChain models with LaunchDarkly's tracking capabilities. + */ +export class LangChainTrackedChat extends BaseTrackedChat { + private _llm: BaseChatModel; + + constructor(aiConfig: LDAIConfig, tracker: LDAIConfigTracker, llm: BaseChatModel) { + super(aiConfig, tracker); + this._llm = llm; + } + + /** + * Provider-specific implementation that converts LDMessage[] to LangChain format, + * invokes the model, and returns a ChatResponse. + */ + protected async invokeModel(messages: LDMessage[]): Promise { + // Convert LDMessage[] to LangChain messages + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); + + // Get the LangChain response + const response = await this._llm.invoke(langchainMessages); + + // Extract token usage if available using the helper method + const usage = LangChainProvider.createTokenUsage(response); + + // Handle different content types from LangChain + let content: string; + if (typeof response.content === 'string') { + content = response.content; + } else if (Array.isArray(response.content)) { + // Handle complex content (e.g., with images) + content = response.content + .map((item: any) => { + if (typeof item === 'string') return item; + if (item.type === 'text') return item.text; + return ''; + }) + .join(''); + } else { + content = String(response.content); + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; + + return { + message: assistantMessage, + usage, + }; + } + + /** + * LangChain-specific invoke method that accepts LangChain-native message types. + * This is the main implementation that does all the tracking and LangChain logic. + */ + async trackLangChainInvoke( + messages: (HumanMessage | SystemMessage | AIMessage)[], + ): Promise { + // Use the trackMetricsOf helper to handle all tracking automatically + return LangChainProvider.trackMetricsOf(this.tracker, () => this._llm.invoke(messages)); + } + + /** + * Get the underlying LangChain model instance. + */ + async getChatModel(): Promise { + return this._llm; + } +} diff --git a/packages/ai-providers/server-ai-langchain/src/index.ts b/packages/ai-providers/server-ai-langchain/src/index.ts new file mode 100644 index 0000000000..ea8dcd6bf4 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/src/index.ts @@ -0,0 +1,11 @@ +/** + * This is the API reference for the LaunchDarkly AI SDK LangChain Provider for Server-Side JavaScript. + * + * This package provides LangChain integration for the LaunchDarkly AI SDK, allowing you to use + * LangChain models and chains with LaunchDarkly's tracking and configuration capabilities. + * + * @packageDocumentation + */ + +export * from './LangChainTrackedChat'; +export * from './LangChainProvider'; diff --git a/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json new file mode 100644 index 0000000000..67f3670709 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json @@ -0,0 +1,4 @@ +{ + "extends": "./tsconfig.json", + "include": ["src/**/*", "**/*.test.ts", "**/*.spec.ts"] +} diff --git a/packages/ai-providers/server-ai-langchain/tsconfig.json b/packages/ai-providers/server-ai-langchain/tsconfig.json new file mode 100644 index 0000000000..6238d6a0f5 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "CommonJS", + "lib": ["ES2020"], + "moduleResolution": "node", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "outDir": "./dist", + "rootDir": "./src", + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*"], + "exclude": ["dist", "node_modules", "**/*.test.ts", "**/*.spec.ts"] +} From a08bfa0788fdd1ca6cac76f7d0ab471ed5d355ea Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Oct 2025 17:07:56 +0000 Subject: [PATCH 02/10] Add release please configuration --- release-please-config.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/release-please-config.json b/release-please-config.json index 9fc35f4bed..69da9f84fb 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -1,5 +1,10 @@ { "packages": { + "packages/ai-providers/server-ai-langchain": { + "bump-minor-pre-major": true, + "release-as": "0.1.0-alpha.0", + "prerelease": true + }, "packages/shared/common": {}, "packages/shared/sdk-client": {}, "packages/shared/sdk-server": {}, From 214321983884ebf94fdf247cf4852980242712f4 Mon Sep 17 00:00:00 2001 From: Jason Bailey Date: Tue, 7 Oct 2025 12:35:38 -0500 Subject: [PATCH 03/10] feat: Convert LangChain implementation to new AIProvider interface (#942) --- .../__tests__/LangChainProvider.test.ts | 58 ++++++- .../server-ai-langchain/jest.config.js | 10 +- .../src/LangChainProvider.ts | 155 ++++++++++++------ .../src/LangChainTrackedChat.ts | 86 ---------- .../server-ai-langchain/src/index.ts | 1 - .../server-ai-langchain/tsconfig.eslint.json | 3 +- 6 files changed, 170 insertions(+), 143 deletions(-) delete mode 100644 packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts diff --git a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts index 0c1cd38f23..f4bab13bf7 100644 --- a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts +++ b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts @@ -1,7 +1,12 @@ -import { AIMessage, HumanMessage, SystemMessage } from 'langchain/schema'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; import { LangChainProvider } from '../src/LangChainProvider'; +// Mock LangChain dependencies +jest.mock('langchain/chat_models/universal', () => ({ + initChatModel: jest.fn(), +})); + describe('LangChainProvider', () => { describe('convertMessagesToLangChain', () => { it('converts system messages to SystemMessage', () => { @@ -49,7 +54,7 @@ describe('LangChainProvider', () => { const messages = [{ role: 'unknown' as any, content: 'Test message' }]; expect(() => LangChainProvider.convertMessagesToLangChain(messages)).toThrow( - 'Unsupported message role: unknown' + 'Unsupported message role: unknown', ); }); @@ -59,4 +64,53 @@ describe('LangChainProvider', () => { expect(result).toHaveLength(0); }); }); + + describe('createAIMetrics', () => { + it('creates metrics with success=true and token usage', () => { + const mockResponse = new AIMessage('Test response'); + mockResponse.response_metadata = { + tokenUsage: { + totalTokens: 100, + promptTokens: 50, + completionTokens: 50, + }, + }; + + const result = LangChainProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 100, + input: 50, + output: 50, + }, + }); + }); + + it('creates metrics with success=true and no usage when metadata is missing', () => { + const mockResponse = new AIMessage('Test response'); + + const result = LangChainProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: undefined, + }); + }); + }); + + describe('mapProvider', () => { + it('maps gemini to google-genai', () => { + expect(LangChainProvider.mapProvider('gemini')).toBe('google-genai'); + expect(LangChainProvider.mapProvider('Gemini')).toBe('google-genai'); + expect(LangChainProvider.mapProvider('GEMINI')).toBe('google-genai'); + }); + + it('returns provider name unchanged for unmapped providers', () => { + expect(LangChainProvider.mapProvider('openai')).toBe('openai'); + expect(LangChainProvider.mapProvider('anthropic')).toBe('anthropic'); + expect(LangChainProvider.mapProvider('unknown')).toBe('unknown'); + }); + }); }); diff --git a/packages/ai-providers/server-ai-langchain/jest.config.js b/packages/ai-providers/server-ai-langchain/jest.config.js index 9e3ea08f04..f106eb3bc9 100644 --- a/packages/ai-providers/server-ai-langchain/jest.config.js +++ b/packages/ai-providers/server-ai-langchain/jest.config.js @@ -1,9 +1,7 @@ module.exports = { - preset: 'ts-jest', + transform: { '^.+\\.ts?$': 'ts-jest' }, + testMatch: ['**/__tests__/**/*test.ts?(x)'], testEnvironment: 'node', - roots: ['/src'], - testMatch: ['**/__tests__/**/*.test.ts'], - collectCoverageFrom: ['src/**/*.ts', '!src/**/*.d.ts'], - coverageDirectory: 'coverage', - coverageReporters: ['text', 'lcov', 'html'], + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + collectCoverageFrom: ['src/**/*.ts'], }; diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts index b80134dd7c..bdd76273fd 100644 --- a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -3,16 +3,95 @@ import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages import { initChatModel } from 'langchain/chat_models/universal'; import { + AIProvider, + ChatResponse, LDAIConfig, - LDAIConfigTracker, + LDAIMetrics, LDMessage, LDTokenUsage, } from '@launchdarkly/server-sdk-ai'; /** - * LangChain provider utilities and helper functions. + * LangChain implementation of AIProvider. + * This provider integrates LangChain models with LaunchDarkly's tracking capabilities. */ -export class LangChainProvider { +export class LangChainProvider extends AIProvider { + private _llm: BaseChatModel; + + constructor(llm: BaseChatModel) { + super(); + this._llm = llm; + } + + // ============================================================================= + // MAIN FACTORY METHOD + // ============================================================================= + + /** + * Static factory method to create a LangChain AIProvider from an AI configuration. + */ + static async create(aiConfig: LDAIConfig): Promise { + const llm = await LangChainProvider.createLangChainModel(aiConfig); + return new LangChainProvider(llm); + } + + // ============================================================================= + // INSTANCE METHODS (AIProvider Implementation) + // ============================================================================= + + /** + * Invoke the LangChain model with an array of messages. + */ + async invokeModel(messages: LDMessage[]): Promise { + // Convert LDMessage[] to LangChain messages + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); + + // Get the LangChain response + const response: AIMessage = await this._llm.invoke(langchainMessages); + + // Handle different content types from LangChain + let content: string; + if (typeof response.content === 'string') { + content = response.content; + } else if (Array.isArray(response.content)) { + // Handle complex content (e.g., with images) + content = response.content + .map((item: any) => { + if (typeof item === 'string') return item; + if (item.type === 'text') return item.text; + return ''; + }) + .join(''); + } else { + content = String(response.content); + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; + + // Extract metrics including token usage and success status + const metrics = LangChainProvider.createAIMetrics(response); + + return { + message: assistantMessage, + metrics, + }; + } + + /** + * Get the underlying LangChain model instance. + */ + getChatModel(): BaseChatModel { + return this._llm; + } + + // ============================================================================= + // STATIC UTILITY METHODS + // ============================================================================= + /** * Map LaunchDarkly provider names to LangChain provider names. * This method enables seamless integration between LaunchDarkly's standardized @@ -29,21 +108,35 @@ export class LangChainProvider { } /** - * Create token usage information from a LangChain provider response. - * This method extracts token usage information from LangChain responses - * and returns a LaunchDarkly TokenUsage object. + * Create AI metrics information from a LangChain provider response. + * This method extracts token usage information and success status from LangChain responses + * and returns a LaunchDarkly AIMetrics object. + * + * @example + * ```typescript + * // Use with tracker.trackMetricsOf for automatic tracking + * const response = await tracker.trackMetricsOf( + * (result: AIMessage) => LangChainProvider.createAIMetrics(result), + * () => llm.invoke(messages) + * ); + * ``` */ - static createTokenUsage(langChainResponse: AIMessage): LDTokenUsage | undefined { - if (!langChainResponse?.response_metadata?.tokenUsage) { - return undefined; + static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics { + // Extract token usage if available + let usage: LDTokenUsage | undefined; + if (langChainResponse?.response_metadata?.tokenUsage) { + const { tokenUsage } = langChainResponse.response_metadata; + usage = { + total: tokenUsage.totalTokens || 0, + input: tokenUsage.promptTokens || 0, + output: tokenUsage.completionTokens || 0, + }; } - const { tokenUsage } = langChainResponse.response_metadata; - + // LangChain responses that complete successfully are considered successful return { - total: tokenUsage.totalTokens || 0, - input: tokenUsage.promptTokens || 0, - output: tokenUsage.completionTokens || 0, + success: true, + usage, }; } @@ -69,38 +162,6 @@ export class LangChainProvider { }); } - /** - * Track metrics for a LangChain callable execution. - * This helper method enables developers to work directly with LangChain callables - * while ensuring consistent tracking behavior. - */ - static async trackMetricsOf( - tracker: LDAIConfigTracker, - callable: () => Promise, - ): Promise { - return tracker.trackDurationOf(async () => { - try { - const result = await callable(); - - // Extract and track token usage if available - const tokenUsage = this.createTokenUsage(result); - if (tokenUsage) { - tracker.trackTokens({ - total: tokenUsage.total, - input: tokenUsage.input, - output: tokenUsage.output, - }); - } - - tracker.trackSuccess(); - return result; - } catch (error) { - tracker.trackError(); - throw error; - } - }); - } - /** * Create a LangChain model from an AI configuration. * This public helper method enables developers to initialize their own LangChain models @@ -116,7 +177,7 @@ export class LangChainProvider { // Use LangChain's universal initChatModel to support multiple providers return initChatModel(modelName, { - modelProvider: this.mapProvider(provider), + modelProvider: LangChainProvider.mapProvider(provider), ...parameters, }); } diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts b/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts deleted file mode 100644 index b0dd093dfb..0000000000 --- a/packages/ai-providers/server-ai-langchain/src/LangChainTrackedChat.ts +++ /dev/null @@ -1,86 +0,0 @@ -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; - -import { - BaseTrackedChat, - ChatResponse, - LDAIConfig, - LDAIConfigTracker, - LDMessage, -} from '@launchdarkly/server-sdk-ai'; - -import { LangChainProvider } from './LangChainProvider'; - -/** - * LangChain-specific implementation of TrackedChat. - * This implementation integrates LangChain models with LaunchDarkly's tracking capabilities. - */ -export class LangChainTrackedChat extends BaseTrackedChat { - private _llm: BaseChatModel; - - constructor(aiConfig: LDAIConfig, tracker: LDAIConfigTracker, llm: BaseChatModel) { - super(aiConfig, tracker); - this._llm = llm; - } - - /** - * Provider-specific implementation that converts LDMessage[] to LangChain format, - * invokes the model, and returns a ChatResponse. - */ - protected async invokeModel(messages: LDMessage[]): Promise { - // Convert LDMessage[] to LangChain messages - const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); - - // Get the LangChain response - const response = await this._llm.invoke(langchainMessages); - - // Extract token usage if available using the helper method - const usage = LangChainProvider.createTokenUsage(response); - - // Handle different content types from LangChain - let content: string; - if (typeof response.content === 'string') { - content = response.content; - } else if (Array.isArray(response.content)) { - // Handle complex content (e.g., with images) - content = response.content - .map((item: any) => { - if (typeof item === 'string') return item; - if (item.type === 'text') return item.text; - return ''; - }) - .join(''); - } else { - content = String(response.content); - } - - // Create the assistant message - const assistantMessage: LDMessage = { - role: 'assistant', - content, - }; - - return { - message: assistantMessage, - usage, - }; - } - - /** - * LangChain-specific invoke method that accepts LangChain-native message types. - * This is the main implementation that does all the tracking and LangChain logic. - */ - async trackLangChainInvoke( - messages: (HumanMessage | SystemMessage | AIMessage)[], - ): Promise { - // Use the trackMetricsOf helper to handle all tracking automatically - return LangChainProvider.trackMetricsOf(this.tracker, () => this._llm.invoke(messages)); - } - - /** - * Get the underlying LangChain model instance. - */ - async getChatModel(): Promise { - return this._llm; - } -} diff --git a/packages/ai-providers/server-ai-langchain/src/index.ts b/packages/ai-providers/server-ai-langchain/src/index.ts index ea8dcd6bf4..63c20c4154 100644 --- a/packages/ai-providers/server-ai-langchain/src/index.ts +++ b/packages/ai-providers/server-ai-langchain/src/index.ts @@ -7,5 +7,4 @@ * @packageDocumentation */ -export * from './LangChainTrackedChat'; export * from './LangChainProvider'; diff --git a/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json index 67f3670709..56c9b38305 100644 --- a/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json +++ b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json @@ -1,4 +1,5 @@ { "extends": "./tsconfig.json", - "include": ["src/**/*", "**/*.test.ts", "**/*.spec.ts"] + "include": ["/**/*.ts"], + "exclude": ["node_modules"] } From fd44c92bb2f987106eeb94a2927a3b0742c5445c Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 8 Oct 2025 14:37:19 +0000 Subject: [PATCH 04/10] add logger to ai provider --- .../src/LangChainProvider.ts | 30 ++++++++----------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts index bdd76273fd..47911486e8 100644 --- a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -2,6 +2,7 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; import { initChatModel } from 'langchain/chat_models/universal'; +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; import { AIProvider, ChatResponse, @@ -18,8 +19,8 @@ import { export class LangChainProvider extends AIProvider { private _llm: BaseChatModel; - constructor(llm: BaseChatModel) { - super(); + constructor(llm: BaseChatModel, logger?: LDLogger) { + super(logger); this._llm = llm; } @@ -30,9 +31,9 @@ export class LangChainProvider extends AIProvider { /** * Static factory method to create a LangChain AIProvider from an AI configuration. */ - static async create(aiConfig: LDAIConfig): Promise { + static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { const llm = await LangChainProvider.createLangChainModel(aiConfig); - return new LangChainProvider(llm); + return new LangChainProvider(llm, logger); } // ============================================================================= @@ -44,26 +45,21 @@ export class LangChainProvider extends AIProvider { */ async invokeModel(messages: LDMessage[]): Promise { // Convert LDMessage[] to LangChain messages - const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages, this.logger); // Get the LangChain response const response: AIMessage = await this._llm.invoke(langchainMessages); - // Handle different content types from LangChain - let content: string; + // Extract text content from the response + let content: string = ''; if (typeof response.content === 'string') { content = response.content; - } else if (Array.isArray(response.content)) { - // Handle complex content (e.g., with images) - content = response.content - .map((item: any) => { - if (typeof item === 'string') return item; - if (item.type === 'text') return item.text; - return ''; - }) - .join(''); } else { - content = String(response.content); + // Log warning for non-string content (likely multimodal) + this.logger?.warn( + `Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`, + JSON.stringify(response.content, null, 2), + ); } // Create the assistant message From 7758ae142a85410a7116688134c29a83e26672e3 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 10 Oct 2025 21:33:04 +0000 Subject: [PATCH 05/10] remove logger from method call and adjust initial version --- packages/ai-providers/server-ai-langchain/package.json | 2 +- .../ai-providers/server-ai-langchain/src/LangChainProvider.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/package.json b/packages/ai-providers/server-ai-langchain/package.json index a97c2dd95a..5631e9ee76 100644 --- a/packages/ai-providers/server-ai-langchain/package.json +++ b/packages/ai-providers/server-ai-langchain/package.json @@ -1,6 +1,6 @@ { "name": "@launchdarkly/server-sdk-ai-langchain", - "version": "0.1.0-alpha.0", + "version": "0.0.0", "description": "LaunchDarkly AI SDK LangChain Provider for Server-Side JavaScript", "homepage": "https://github.com/launchdarkly/js-core/tree/main/packages/ai-providers/server-ai-langchain", "repository": { diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts index 47911486e8..795777719d 100644 --- a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -45,7 +45,7 @@ export class LangChainProvider extends AIProvider { */ async invokeModel(messages: LDMessage[]): Promise { // Convert LDMessage[] to LangChain messages - const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages, this.logger); + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); // Get the LangChain response const response: AIMessage = await this._llm.invoke(langchainMessages); From e2ab19a4c50b0063945758d846cb9312ed52b8e2 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 10 Oct 2025 21:34:49 +0000 Subject: [PATCH 06/10] fix release-please initial version --- .release-please-manifest.json | 2 +- release-please-config.json | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7d51191fcb..a661b913b1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,5 +1,5 @@ { - "packages/ai-providers/server-ai-langchain": "0.1.0-alpha.0", + "packages/ai-providers/server-ai-langchain": "0.0.0", "packages/sdk/akamai-base": "3.0.10", "packages/sdk/akamai-edgekv": "1.4.12", "packages/sdk/browser": "0.8.1", diff --git a/release-please-config.json b/release-please-config.json index 69da9f84fb..77afac3eec 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -2,7 +2,6 @@ "packages": { "packages/ai-providers/server-ai-langchain": { "bump-minor-pre-major": true, - "release-as": "0.1.0-alpha.0", "prerelease": true }, "packages/shared/common": {}, From a1d52fb50f02d592dff8ccbf23f3261ab1482d41 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 13 Oct 2025 15:06:52 +0000 Subject: [PATCH 07/10] fix: Target proper version of AI SDK for langchain --- packages/ai-providers/server-ai-langchain/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ai-providers/server-ai-langchain/package.json b/packages/ai-providers/server-ai-langchain/package.json index 5631e9ee76..4642fc5e75 100644 --- a/packages/ai-providers/server-ai-langchain/package.json +++ b/packages/ai-providers/server-ai-langchain/package.json @@ -28,7 +28,7 @@ "license": "Apache-2.0", "dependencies": { "@langchain/core": ">=0.2.21 <0.3.0", - "@launchdarkly/server-sdk-ai": "0.11.4", + "@launchdarkly/server-sdk-ai": "^0.12.0", "langchain": "^0.2.11" }, "devDependencies": { From 2334e669d91e07e746cff321aa0487fcdfa3fe0e Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 13 Oct 2025 20:17:21 +0000 Subject: [PATCH 08/10] add release-please for langchain package --- .github/workflows/release-please.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index d8be2d4ae6..358a6caa92 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -27,6 +27,7 @@ jobs: package-react-universal-release: ${{ steps.release.outputs['packages/sdk/react-universal--release_created'] }} package-browser-released: ${{ steps.release.outputs['packages/sdk/browser--release_created'] }} package-server-ai-released: ${{ steps.release.outputs['packages/sdk/server-ai--release_created'] }} + package-server-ai-langchain-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--release_created'] }} package-browser-telemetry-released: ${{ steps.release.outputs['packages/telemetry/browser-telemetry--release_created'] }} package-combined-browser-released: ${{ steps.release.outputs['packages/sdk/combined-browser--release_created'] }} steps: @@ -460,3 +461,23 @@ jobs: with: workspace_path: packages/sdk/combined-browser aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + + release-server-ai-langchain: + runs-on: ubuntu-latest + needs: ['release-please', 'release-server-ai'] + permissions: + id-token: write + contents: write + if: ${{ always() && !failure() && !cancelled() && needs.release-please.outputs.package-server-ai-langchain-released == 'true'}} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 22.x + registry-url: 'https://registry.npmjs.org' + - id: release-server-ai-langchain + name: Full release of packages/ai-providers/server-ai-langchain + uses: ./actions/full-release + with: + workspace_path: packages/ai-providers/server-ai-langchain + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} From 6df89f7024a669af8b5957e05eccb679264ddf26 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 13 Oct 2025 20:32:21 +0000 Subject: [PATCH 09/10] Remove experimental packages We may include these in the future but they don't need to be added now and not in the langchain PR. --- .release-please-manifest.json | 2 -- 1 file changed, 2 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9692f475e0..1dea9a82d7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -7,10 +7,8 @@ "packages/sdk/combined-browser": "0.0.0", "packages/sdk/fastly": "0.2.1", "packages/sdk/react-native": "10.11.0", - "packages/sdk/react-universal": "0.0.1", "packages/sdk/server-ai": "0.12.0", "packages/sdk/server-node": "9.10.2", - "packages/sdk/svelte": "0.1.0", "packages/sdk/vercel": "1.3.34", "packages/shared/akamai-edgeworker-sdk": "2.0.10", "packages/shared/common": "2.19.0", From ea659d4e1fc8e76c568399f6de202ca7c57c43d1 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 13 Oct 2025 20:53:04 +0000 Subject: [PATCH 10/10] return success false if we do not support the result This is debatable since the model did produce a result. Returning not success allows users to rollback an invalid config that generates an unsupported response so we are taking a more cautious approach. --- .../__tests__/LangChainProvider.test.ts | 63 +++++++++++++++++++ .../src/LangChainProvider.ts | 11 ++-- 2 files changed, 70 insertions(+), 4 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts index f4bab13bf7..5cd7b5879d 100644 --- a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts +++ b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts @@ -7,6 +7,14 @@ jest.mock('langchain/chat_models/universal', () => ({ initChatModel: jest.fn(), })); +// Mock logger +const mockLogger = { + warn: jest.fn(), + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), +}; + describe('LangChainProvider', () => { describe('convertMessagesToLangChain', () => { it('converts system messages to SystemMessage', () => { @@ -100,6 +108,61 @@ describe('LangChainProvider', () => { }); }); + describe('invokeModel', () => { + let mockLLM: any; + let provider: LangChainProvider; + + beforeEach(() => { + mockLLM = { + invoke: jest.fn(), + }; + provider = new LangChainProvider(mockLLM, mockLogger); + jest.clearAllMocks(); + }); + + it('returns success=true for string content', async () => { + const mockResponse = new AIMessage('Test response'); + mockLLM.invoke.mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const result = await provider.invokeModel(messages); + + expect(result.metrics.success).toBe(true); + expect(result.message.content).toBe('Test response'); + expect(mockLogger.warn).not.toHaveBeenCalled(); + }); + + it('returns success=false for non-string content and logs warning', async () => { + const mockResponse = new AIMessage({ type: 'image', data: 'base64data' } as any); + mockLLM.invoke.mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const result = await provider.invokeModel(messages); + + expect(result.metrics.success).toBe(false); + expect(result.message.content).toBe(''); + expect(mockLogger.warn).toHaveBeenCalledWith( + 'Multimodal response not supported, expecting a string. Content type: object, Content:', + JSON.stringify({ type: 'image', data: 'base64data' }, null, 2), + ); + }); + + it('returns success=false for array content and logs warning', async () => { + const mockResponse = new AIMessage(['text', { type: 'image', data: 'base64data' }] as any); + mockLLM.invoke.mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const result = await provider.invokeModel(messages); + + expect(result.metrics.success).toBe(false); + expect(result.message.content).toBe(''); + expect(mockLogger.warn).toHaveBeenCalledWith( + 'Multimodal response not supported, expecting a string. Content type: object, Content:', + JSON.stringify(['text', { type: 'image', data: 'base64data' }], null, 2), + ); + }); + }); + describe('mapProvider', () => { it('maps gemini to google-genai', () => { expect(LangChainProvider.mapProvider('gemini')).toBe('google-genai'); diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts index 795777719d..b7981abac3 100644 --- a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -50,6 +50,9 @@ export class LangChainProvider extends AIProvider { // Get the LangChain response const response: AIMessage = await this._llm.invoke(langchainMessages); + // Generate metrics early (assumes success by default) + const metrics = LangChainProvider.createAIMetrics(response); + // Extract text content from the response let content: string = ''; if (typeof response.content === 'string') { @@ -60,6 +63,8 @@ export class LangChainProvider extends AIProvider { `Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`, JSON.stringify(response.content, null, 2), ); + // Update metrics to reflect content loss + metrics.success = false; } // Create the assistant message @@ -68,9 +73,6 @@ export class LangChainProvider extends AIProvider { content, }; - // Extract metrics including token usage and success status - const metrics = LangChainProvider.createAIMetrics(response); - return { message: assistantMessage, metrics, @@ -108,6 +110,7 @@ export class LangChainProvider extends AIProvider { * This method extracts token usage information and success status from LangChain responses * and returns a LaunchDarkly AIMetrics object. * + * @param langChainResponse The response from the LangChain model * @example * ```typescript * // Use with tracker.trackMetricsOf for automatic tracking @@ -129,7 +132,7 @@ export class LangChainProvider extends AIProvider { }; } - // LangChain responses that complete successfully are considered successful + // LangChain responses that complete successfully are considered successful by default return { success: true, usage,