diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index d8be2d4ae6..358a6caa92 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -27,6 +27,7 @@ jobs: package-react-universal-release: ${{ steps.release.outputs['packages/sdk/react-universal--release_created'] }} package-browser-released: ${{ steps.release.outputs['packages/sdk/browser--release_created'] }} package-server-ai-released: ${{ steps.release.outputs['packages/sdk/server-ai--release_created'] }} + package-server-ai-langchain-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--release_created'] }} package-browser-telemetry-released: ${{ steps.release.outputs['packages/telemetry/browser-telemetry--release_created'] }} package-combined-browser-released: ${{ steps.release.outputs['packages/sdk/combined-browser--release_created'] }} steps: @@ -460,3 +461,23 @@ jobs: with: workspace_path: packages/sdk/combined-browser aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + + release-server-ai-langchain: + runs-on: ubuntu-latest + needs: ['release-please', 'release-server-ai'] + permissions: + id-token: write + contents: write + if: ${{ always() && !failure() && !cancelled() && needs.release-please.outputs.package-server-ai-langchain-released == 'true'}} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 22.x + registry-url: 'https://registry.npmjs.org' + - id: release-server-ai-langchain + name: Full release of packages/ai-providers/server-ai-langchain + uses: ./actions/full-release + with: + workspace_path: packages/ai-providers/server-ai-langchain + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b19297a489..1dea9a82d7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,22 +1,23 @@ { - "packages/shared/common": "2.19.0", - "packages/shared/sdk-server": "2.16.2", - "packages/sdk/server-node": "9.10.2", + "packages/ai-providers/server-ai-langchain": "0.0.0", + "packages/sdk/akamai-base": "3.0.10", + "packages/sdk/akamai-edgekv": "1.4.12", + "packages/sdk/browser": "0.8.1", "packages/sdk/cloudflare": "2.7.10", + "packages/sdk/combined-browser": "0.0.0", "packages/sdk/fastly": "0.2.1", - "packages/shared/sdk-server-edge": "2.6.9", + "packages/sdk/react-native": "10.11.0", + "packages/sdk/server-ai": "0.12.0", + "packages/sdk/server-node": "9.10.2", "packages/sdk/vercel": "1.3.34", - "packages/sdk/akamai-base": "3.0.10", - "packages/sdk/akamai-edgekv": "1.4.12", "packages/shared/akamai-edgeworker-sdk": "2.0.10", + "packages/shared/common": "2.19.0", + "packages/shared/sdk-client": "1.15.1", + "packages/shared/sdk-server": "2.16.2", + "packages/shared/sdk-server-edge": "2.6.9", "packages/store/node-server-sdk-dynamodb": "6.2.14", "packages/store/node-server-sdk-redis": "4.2.14", - "packages/shared/sdk-client": "1.15.1", - "packages/sdk/react-native": "10.11.0", - "packages/telemetry/node-server-sdk-otel": "1.3.2", - "packages/sdk/browser": "0.8.1", - "packages/sdk/server-ai": "0.12.0", "packages/telemetry/browser-telemetry": "1.0.11", - "packages/tooling/jest": "0.1.11", - "packages/sdk/combined-browser": "0.0.0" + "packages/telemetry/node-server-sdk-otel": "1.3.2", + "packages/tooling/jest": "0.1.11" } diff --git a/package.json b/package.json index 313d3cf351..e05ef41e6b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,7 @@ { "name": "@launchdarkly/js-core", "workspaces": [ + "packages/ai-providers/server-ai-langchain", "packages/shared/common", "packages/shared/sdk-client", "packages/shared/sdk-server", diff --git a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts new file mode 100644 index 0000000000..5cd7b5879d --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts @@ -0,0 +1,179 @@ +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; + +import { LangChainProvider } from '../src/LangChainProvider'; + +// Mock LangChain dependencies +jest.mock('langchain/chat_models/universal', () => ({ + initChatModel: jest.fn(), +})); + +// Mock logger +const mockLogger = { + warn: jest.fn(), + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), +}; + +describe('LangChainProvider', () => { + describe('convertMessagesToLangChain', () => { + it('converts system messages to SystemMessage', () => { + const messages = [{ role: 'system' as const, content: 'You are a helpful assistant.' }]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[0].content).toBe('You are a helpful assistant.'); + }); + + it('converts user messages to HumanMessage', () => { + const messages = [{ role: 'user' as const, content: 'Hello, how are you?' }]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(HumanMessage); + expect(result[0].content).toBe('Hello, how are you?'); + }); + + it('converts assistant messages to AIMessage', () => { + const messages = [{ role: 'assistant' as const, content: 'I am doing well, thank you!' }]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[0].content).toBe('I am doing well, thank you!'); + }); + + it('converts multiple messages in order', () => { + const messages = [ + { role: 'system' as const, content: 'You are a helpful assistant.' }, + { role: 'user' as const, content: 'What is the weather like?' }, + { role: 'assistant' as const, content: 'I cannot check the weather.' }, + ]; + const result = LangChainProvider.convertMessagesToLangChain(messages); + + expect(result).toHaveLength(3); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[1]).toBeInstanceOf(HumanMessage); + expect(result[2]).toBeInstanceOf(AIMessage); + }); + + it('throws error for unsupported message role', () => { + const messages = [{ role: 'unknown' as any, content: 'Test message' }]; + + expect(() => LangChainProvider.convertMessagesToLangChain(messages)).toThrow( + 'Unsupported message role: unknown', + ); + }); + + it('handles empty message array', () => { + const result = LangChainProvider.convertMessagesToLangChain([]); + + expect(result).toHaveLength(0); + }); + }); + + describe('createAIMetrics', () => { + it('creates metrics with success=true and token usage', () => { + const mockResponse = new AIMessage('Test response'); + mockResponse.response_metadata = { + tokenUsage: { + totalTokens: 100, + promptTokens: 50, + completionTokens: 50, + }, + }; + + const result = LangChainProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 100, + input: 50, + output: 50, + }, + }); + }); + + it('creates metrics with success=true and no usage when metadata is missing', () => { + const mockResponse = new AIMessage('Test response'); + + const result = LangChainProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: undefined, + }); + }); + }); + + describe('invokeModel', () => { + let mockLLM: any; + let provider: LangChainProvider; + + beforeEach(() => { + mockLLM = { + invoke: jest.fn(), + }; + provider = new LangChainProvider(mockLLM, mockLogger); + jest.clearAllMocks(); + }); + + it('returns success=true for string content', async () => { + const mockResponse = new AIMessage('Test response'); + mockLLM.invoke.mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const result = await provider.invokeModel(messages); + + expect(result.metrics.success).toBe(true); + expect(result.message.content).toBe('Test response'); + expect(mockLogger.warn).not.toHaveBeenCalled(); + }); + + it('returns success=false for non-string content and logs warning', async () => { + const mockResponse = new AIMessage({ type: 'image', data: 'base64data' } as any); + mockLLM.invoke.mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const result = await provider.invokeModel(messages); + + expect(result.metrics.success).toBe(false); + expect(result.message.content).toBe(''); + expect(mockLogger.warn).toHaveBeenCalledWith( + 'Multimodal response not supported, expecting a string. Content type: object, Content:', + JSON.stringify({ type: 'image', data: 'base64data' }, null, 2), + ); + }); + + it('returns success=false for array content and logs warning', async () => { + const mockResponse = new AIMessage(['text', { type: 'image', data: 'base64data' }] as any); + mockLLM.invoke.mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const result = await provider.invokeModel(messages); + + expect(result.metrics.success).toBe(false); + expect(result.message.content).toBe(''); + expect(mockLogger.warn).toHaveBeenCalledWith( + 'Multimodal response not supported, expecting a string. Content type: object, Content:', + JSON.stringify(['text', { type: 'image', data: 'base64data' }], null, 2), + ); + }); + }); + + describe('mapProvider', () => { + it('maps gemini to google-genai', () => { + expect(LangChainProvider.mapProvider('gemini')).toBe('google-genai'); + expect(LangChainProvider.mapProvider('Gemini')).toBe('google-genai'); + expect(LangChainProvider.mapProvider('GEMINI')).toBe('google-genai'); + }); + + it('returns provider name unchanged for unmapped providers', () => { + expect(LangChainProvider.mapProvider('openai')).toBe('openai'); + expect(LangChainProvider.mapProvider('anthropic')).toBe('anthropic'); + expect(LangChainProvider.mapProvider('unknown')).toBe('unknown'); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-langchain/jest.config.js b/packages/ai-providers/server-ai-langchain/jest.config.js new file mode 100644 index 0000000000..f106eb3bc9 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/jest.config.js @@ -0,0 +1,7 @@ +module.exports = { + transform: { '^.+\\.ts?$': 'ts-jest' }, + testMatch: ['**/__tests__/**/*test.ts?(x)'], + testEnvironment: 'node', + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + collectCoverageFrom: ['src/**/*.ts'], +}; diff --git a/packages/ai-providers/server-ai-langchain/package.json b/packages/ai-providers/server-ai-langchain/package.json new file mode 100644 index 0000000000..4642fc5e75 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/package.json @@ -0,0 +1,55 @@ +{ + "name": "@launchdarkly/server-sdk-ai-langchain", + "version": "0.0.0", + "description": "LaunchDarkly AI SDK LangChain Provider for Server-Side JavaScript", + "homepage": "https://github.com/launchdarkly/js-core/tree/main/packages/ai-providers/server-ai-langchain", + "repository": { + "type": "git", + "url": "https://github.com/launchdarkly/js-core.git" + }, + "main": "dist/index.js", + "types": "dist/index.d.ts", + "type": "commonjs", + "scripts": { + "build": "npx tsc", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test", + "test": "jest" + }, + "keywords": [ + "launchdarkly", + "ai", + "llm", + "langchain" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "@langchain/core": ">=0.2.21 <0.3.0", + "@launchdarkly/server-sdk-ai": "^0.12.0", + "langchain": "^0.2.11" + }, + "devDependencies": { + "@launchdarkly/js-server-sdk-common": "2.16.2", + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@types/jest": "^29.5.3", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.6.1", + "prettier": "^3.0.0", + "ts-jest": "^29.1.1", + "typescript": "5.1.6" + }, + "peerDependencies": { + "@launchdarkly/js-server-sdk-common": "2.x" + } +} diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts new file mode 100644 index 0000000000..b7981abac3 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -0,0 +1,183 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages'; +import { initChatModel } from 'langchain/chat_models/universal'; + +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; +import { + AIProvider, + ChatResponse, + LDAIConfig, + LDAIMetrics, + LDMessage, + LDTokenUsage, +} from '@launchdarkly/server-sdk-ai'; + +/** + * LangChain implementation of AIProvider. + * This provider integrates LangChain models with LaunchDarkly's tracking capabilities. + */ +export class LangChainProvider extends AIProvider { + private _llm: BaseChatModel; + + constructor(llm: BaseChatModel, logger?: LDLogger) { + super(logger); + this._llm = llm; + } + + // ============================================================================= + // MAIN FACTORY METHOD + // ============================================================================= + + /** + * Static factory method to create a LangChain AIProvider from an AI configuration. + */ + static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { + const llm = await LangChainProvider.createLangChainModel(aiConfig); + return new LangChainProvider(llm, logger); + } + + // ============================================================================= + // INSTANCE METHODS (AIProvider Implementation) + // ============================================================================= + + /** + * Invoke the LangChain model with an array of messages. + */ + async invokeModel(messages: LDMessage[]): Promise { + // Convert LDMessage[] to LangChain messages + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); + + // Get the LangChain response + const response: AIMessage = await this._llm.invoke(langchainMessages); + + // Generate metrics early (assumes success by default) + const metrics = LangChainProvider.createAIMetrics(response); + + // Extract text content from the response + let content: string = ''; + if (typeof response.content === 'string') { + content = response.content; + } else { + // Log warning for non-string content (likely multimodal) + this.logger?.warn( + `Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`, + JSON.stringify(response.content, null, 2), + ); + // Update metrics to reflect content loss + metrics.success = false; + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; + + return { + message: assistantMessage, + metrics, + }; + } + + /** + * Get the underlying LangChain model instance. + */ + getChatModel(): BaseChatModel { + return this._llm; + } + + // ============================================================================= + // STATIC UTILITY METHODS + // ============================================================================= + + /** + * Map LaunchDarkly provider names to LangChain provider names. + * This method enables seamless integration between LaunchDarkly's standardized + * provider naming and LangChain's naming conventions. + */ + static mapProvider(ldProviderName: string): string { + const lowercasedName = ldProviderName.toLowerCase(); + + const mapping: Record = { + gemini: 'google-genai', + }; + + return mapping[lowercasedName] || lowercasedName; + } + + /** + * Create AI metrics information from a LangChain provider response. + * This method extracts token usage information and success status from LangChain responses + * and returns a LaunchDarkly AIMetrics object. + * + * @param langChainResponse The response from the LangChain model + * @example + * ```typescript + * // Use with tracker.trackMetricsOf for automatic tracking + * const response = await tracker.trackMetricsOf( + * (result: AIMessage) => LangChainProvider.createAIMetrics(result), + * () => llm.invoke(messages) + * ); + * ``` + */ + static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics { + // Extract token usage if available + let usage: LDTokenUsage | undefined; + if (langChainResponse?.response_metadata?.tokenUsage) { + const { tokenUsage } = langChainResponse.response_metadata; + usage = { + total: tokenUsage.totalTokens || 0, + input: tokenUsage.promptTokens || 0, + output: tokenUsage.completionTokens || 0, + }; + } + + // LangChain responses that complete successfully are considered successful by default + return { + success: true, + usage, + }; + } + + /** + * Convert LaunchDarkly messages to LangChain messages. + * This helper method enables developers to work directly with LangChain message types + * while maintaining compatibility with LaunchDarkly's standardized message format. + */ + static convertMessagesToLangChain( + messages: LDMessage[], + ): (HumanMessage | SystemMessage | AIMessage)[] { + return messages.map((msg) => { + switch (msg.role) { + case 'system': + return new SystemMessage(msg.content); + case 'user': + return new HumanMessage(msg.content); + case 'assistant': + return new AIMessage(msg.content); + default: + throw new Error(`Unsupported message role: ${msg.role}`); + } + }); + } + + /** + * Create a LangChain model from an AI configuration. + * This public helper method enables developers to initialize their own LangChain models + * using LaunchDarkly AI configurations. + * + * @param aiConfig The LaunchDarkly AI configuration + * @returns A Promise that resolves to a configured LangChain BaseChatModel + */ + static async createLangChainModel(aiConfig: LDAIConfig): Promise { + const modelName = aiConfig.model?.name || ''; + const provider = aiConfig.provider?.name || ''; + const parameters = aiConfig.model?.parameters || {}; + + // Use LangChain's universal initChatModel to support multiple providers + return initChatModel(modelName, { + modelProvider: LangChainProvider.mapProvider(provider), + ...parameters, + }); + } +} diff --git a/packages/ai-providers/server-ai-langchain/src/index.ts b/packages/ai-providers/server-ai-langchain/src/index.ts new file mode 100644 index 0000000000..63c20c4154 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/src/index.ts @@ -0,0 +1,10 @@ +/** + * This is the API reference for the LaunchDarkly AI SDK LangChain Provider for Server-Side JavaScript. + * + * This package provides LangChain integration for the LaunchDarkly AI SDK, allowing you to use + * LangChain models and chains with LaunchDarkly's tracking and configuration capabilities. + * + * @packageDocumentation + */ + +export * from './LangChainProvider'; diff --git a/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json new file mode 100644 index 0000000000..56c9b38305 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/packages/ai-providers/server-ai-langchain/tsconfig.json b/packages/ai-providers/server-ai-langchain/tsconfig.json new file mode 100644 index 0000000000..6238d6a0f5 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "CommonJS", + "lib": ["ES2020"], + "moduleResolution": "node", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "outDir": "./dist", + "rootDir": "./src", + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*"], + "exclude": ["dist", "node_modules", "**/*.test.ts", "**/*.spec.ts"] +} diff --git a/release-please-config.json b/release-please-config.json index 5f903b99e6..0a0725f91b 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -1,5 +1,9 @@ { "packages": { + "packages/ai-providers/server-ai-langchain": { + "bump-minor-pre-major": true, + "prerelease": true + }, "packages/shared/common": {}, "packages/shared/sdk-client": {}, "packages/shared/sdk-server": {},