diff --git a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts index 7568eff456..b6a6a5f8e7 100644 --- a/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts +++ b/packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts @@ -155,6 +155,66 @@ describe('LangChainProvider', () => { expect(result.message.content).toBe(''); expect(mockLogger.warn).toHaveBeenCalledTimes(1); }); + + it('returns success=false when model invocation throws an error', async () => { + const error = new Error('Model invocation failed'); + mockLLM.invoke.mockRejectedValue(error); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const result = await provider.invokeModel(messages); + + expect(result.metrics.success).toBe(false); + expect(result.message.content).toBe(''); + expect(result.message.role).toBe('assistant'); + expect(mockLogger.error).toHaveBeenCalledWith('LangChain model invocation failed:', error); + }); + }); + + describe('invokeStructuredModel', () => { + let mockLLM: any; + let provider: LangChainProvider; + + beforeEach(() => { + mockLLM = { + withStructuredOutput: jest.fn(), + }; + provider = new LangChainProvider(mockLLM, mockLogger); + jest.clearAllMocks(); + }); + + it('returns success=true for successful invocation', async () => { + const mockResponse = { result: 'structured data' }; + const mockInvoke = jest.fn().mockResolvedValue(mockResponse); + mockLLM.withStructuredOutput.mockReturnValue({ invoke: mockInvoke }); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const responseStructure = { type: 'object', properties: {} }; + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result.metrics.success).toBe(true); + expect(result.data).toEqual(mockResponse); + expect(result.rawResponse).toBe(JSON.stringify(mockResponse)); + expect(mockLogger.error).not.toHaveBeenCalled(); + }); + + it('returns success=false when structured model invocation throws an error', async () => { + const error = new Error('Structured invocation failed'); + const mockInvoke = jest.fn().mockRejectedValue(error); + mockLLM.withStructuredOutput.mockReturnValue({ invoke: mockInvoke }); + + const messages = [{ role: 'user' as const, content: 'Hello' }]; + const responseStructure = { type: 'object', properties: {} }; + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result.metrics.success).toBe(false); + expect(result.data).toEqual({}); + expect(result.rawResponse).toBe(''); + expect(result.metrics.usage).toEqual({ total: 0, input: 0, output: 0 }); + expect(mockLogger.error).toHaveBeenCalledWith( + 'LangChain structured model invocation failed:', + error, + ); + }); }); describe('mapProvider', () => { diff --git a/packages/ai-providers/server-ai-langchain/package.json b/packages/ai-providers/server-ai-langchain/package.json index 9c7c975e47..5cb5425c8e 100644 --- a/packages/ai-providers/server-ai-langchain/package.json +++ b/packages/ai-providers/server-ai-langchain/package.json @@ -28,7 +28,7 @@ "license": "Apache-2.0", "devDependencies": { "@langchain/core": "^0.3.0", - "@launchdarkly/server-sdk-ai": "^0.12.3", + "@launchdarkly/server-sdk-ai": "^0.12.0", "@trivago/prettier-plugin-sort-imports": "^4.1.1", "@types/jest": "^29.5.3", "@typescript-eslint/eslint-plugin": "^6.20.0", @@ -48,7 +48,7 @@ }, "peerDependencies": { "@langchain/core": "^0.2.0 || ^0.3.0", - "@launchdarkly/server-sdk-ai": "^0.12.2", + "@launchdarkly/server-sdk-ai": "^0.12.0", "langchain": "^0.2.0 || ^0.3.0" } } diff --git a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts index 01df295384..20f7a929e1 100644 --- a/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts +++ b/packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts @@ -10,6 +10,7 @@ import type { LDLogger, LDMessage, LDTokenUsage, + StructuredResponse, } from '@launchdarkly/server-sdk-ai'; /** @@ -44,39 +45,102 @@ export class LangChainProvider extends AIProvider { * Invoke the LangChain model with an array of messages. */ async invokeModel(messages: LDMessage[]): Promise { - // Convert LDMessage[] to LangChain messages - const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); - - // Get the LangChain response - const response: AIMessage = await this._llm.invoke(langchainMessages); - - // Generate metrics early (assumes success by default) - const metrics = LangChainProvider.createAIMetrics(response); - - // Extract text content from the response - let content: string = ''; - if (typeof response.content === 'string') { - content = response.content; - } else { - // Log warning for non-string content (likely multimodal) - this.logger?.warn( - `Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`, - JSON.stringify(response.content, null, 2), - ); - // Update metrics to reflect content loss - metrics.success = false; + try { + // Convert LDMessage[] to LangChain messages + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); + + // Get the LangChain response + const response: AIMessage = await this._llm.invoke(langchainMessages); + + // Generate metrics early (assumes success by default) + const metrics = LangChainProvider.createAIMetrics(response); + + // Extract text content from the response + let content: string = ''; + if (typeof response.content === 'string') { + content = response.content; + } else { + // Log warning for non-string content (likely multimodal) + this.logger?.warn( + `Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`, + JSON.stringify(response.content, null, 2), + ); + // Update metrics to reflect content loss + metrics.success = false; + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; + + return { + message: assistantMessage, + metrics, + }; + } catch (error) { + this.logger?.warn('LangChain model invocation failed:', error); + + return { + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + }, + }; } + } - // Create the assistant message - const assistantMessage: LDMessage = { - role: 'assistant', - content, - }; + /** + * Invoke the LangChain model with structured output support. + */ + async invokeStructuredModel( + messages: LDMessage[], + responseStructure: Record, + ): Promise { + try { + // Convert LDMessage[] to LangChain messages + const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages); + + // Get the LangChain response + const response = await this._llm + .withStructuredOutput(responseStructure) + .invoke(langchainMessages); + + // Using structured output doesn't support metrics + const metrics = { + success: true, + usage: { + total: 0, + input: 0, + output: 0, + }, + }; - return { - message: assistantMessage, - metrics, - }; + return { + data: response, + rawResponse: JSON.stringify(response), + metrics, + }; + } catch (error) { + this.logger?.warn('LangChain structured model invocation failed:', error); + + return { + data: {}, + rawResponse: '', + metrics: { + success: false, + usage: { + total: 0, + input: 0, + output: 0, + }, + }, + }; + } } /**