diff --git a/.github/workflows/manual-publish.yml b/.github/workflows/manual-publish.yml index 09c66d0f5f..17771edb76 100644 --- a/.github/workflows/manual-publish.yml +++ b/.github/workflows/manual-publish.yml @@ -35,6 +35,7 @@ on: - packages/tooling/jest - packages/sdk/browser - packages/sdk/server-ai + - packages/ai-providers/server-ai-openai - packages/ai-providers/server-ai-vercel - packages/ai-providers/server-ai-langchain - packages/telemetry/browser-telemetry diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 358a6caa92..5027cde82c 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -28,6 +28,7 @@ jobs: package-browser-released: ${{ steps.release.outputs['packages/sdk/browser--release_created'] }} package-server-ai-released: ${{ steps.release.outputs['packages/sdk/server-ai--release_created'] }} package-server-ai-langchain-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--release_created'] }} + package-server-ai-openai-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--release_created'] }} package-browser-telemetry-released: ${{ steps.release.outputs['packages/telemetry/browser-telemetry--release_created'] }} package-combined-browser-released: ${{ steps.release.outputs['packages/sdk/combined-browser--release_created'] }} steps: @@ -481,3 +482,23 @@ jobs: with: workspace_path: packages/ai-providers/server-ai-langchain aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + + release-server-ai-openai: + runs-on: ubuntu-latest + needs: ['release-please', 'release-server-ai'] + permissions: + id-token: write + contents: write + if: ${{ always() && !failure() && !cancelled() && needs.release-please.outputs.package-server-ai-openai-released == 'true'}} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 22.x + registry-url: 'https://registry.npmjs.org' + - id: release-server-ai-openai + name: Full release of packages/ai-providers/server-ai-openai + uses: ./actions/full-release + with: + workspace_path: packages/ai-providers/server-ai-openai + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} diff --git a/.github/workflows/server-ai-openai.yml b/.github/workflows/server-ai-openai.yml new file mode 100644 index 0000000000..36d7020543 --- /dev/null +++ b/.github/workflows/server-ai-openai.yml @@ -0,0 +1,27 @@ +name: ai-providers/server-ai-openai + +on: + push: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' #Do not need to run CI for markdown changes. + pull_request: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' + +jobs: + build-test-openai-provider: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 + with: + node-version: 22.x + registry-url: 'https://registry.npmjs.org' + - id: shared + name: Shared CI Steps + uses: ./actions/ci + with: + workspace_name: '@launchdarkly/server-sdk-ai-openai' + workspace_path: packages/ai-providers/server-ai-openai diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8adc53c3ab..93d29fdb3b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,5 +1,6 @@ { "packages/ai-providers/server-ai-langchain": "0.1.0", + "packages/ai-providers/server-ai-openai": "0.0.0", "packages/ai-providers/server-ai-vercel": "0.0.0", "packages/sdk/akamai-base": "3.0.10", "packages/sdk/akamai-edgekv": "1.4.12", diff --git a/README.md b/README.md index 6d26cac6af..44e17805bc 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ This includes shared libraries, used by SDKs and other tools, as well as SDKs. | AI Providers | npm | issues | tests | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------- | ------------------------------------------- | ------------------------------------------------------------------- | | [@launchdarkly/server-sdk-ai-langchain](packages/ai-providers/server-ai-langchain/README.md) | [![NPM][server-ai-langchain-npm-badge]][server-ai-langchain-npm-link] | [server-ai-langchain][package-ai-providers-server-ai-langchain-issues] | [![Actions Status][server-ai-langchain-ci-badge]][server-ai-langchain-ci] | +| [@launchdarkly/server-sdk-ai-openai](packages/ai-providers/server-ai-openai/README.md) | [![NPM][server-ai-openai-npm-badge]][server-ai-openai-npm-link] | [server-ai-openai][package-ai-providers-server-ai-openai-issues] | [![Actions Status][server-ai-openai-ci-badge]][server-ai-openai-ci] | | [@launchdarkly/server-sdk-ai-vercel](packages/ai-providers/server-ai-vercel/README.md) | [![NPM][server-ai-vercel-npm-badge]][server-ai-vercel-npm-link] | [server-ai-vercel][package-ai-providers-server-ai-vercel-issues] | [![Actions Status][server-ai-vercel-ci-badge]][server-ai-vercel-ci] | ## Organization @@ -231,6 +232,12 @@ We encourage pull requests and other contributions from the community. Check out [server-ai-langchain-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-langchain.svg?style=flat-square [server-ai-langchain-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-langchain [package-ai-providers-server-ai-langchain-issues]: https://github.com/launchdarkly/js-core/issues?q=is%3Aissue+is%3Aopen+label%3A%22package%3A+ai-providers%2Fserver-ai-langchain%22+ +[//]: # 'ai-providers/server-ai-openai' +[server-ai-openai-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml/badge.svg +[server-ai-openai-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml +[server-ai-openai-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square +[server-ai-openai-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-openai +[package-ai-providers-server-ai-openai-issues]: https://github.com/launchdarkly/js-core/issues?q=is%3Aissue+is%3Aopen+label%3A%22package%3A+ai-providers%2Fserver-ai-openai%22+ [//]: # 'ai-providers/server-ai-vercel' [server-ai-vercel-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml/badge.svg [server-ai-vercel-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml diff --git a/package.json b/package.json index 6ecf0bb14f..701eb9a81a 100644 --- a/package.json +++ b/package.json @@ -2,6 +2,7 @@ "name": "@launchdarkly/js-core", "workspaces": [ "packages/ai-providers/server-ai-langchain", + "packages/ai-providers/server-ai-openai", "packages/ai-providers/server-ai-vercel", "packages/shared/common", "packages/shared/sdk-client", diff --git a/packages/ai-providers/server-ai-openai/README.md b/packages/ai-providers/server-ai-openai/README.md new file mode 100644 index 0000000000..be61c32bd0 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/README.md @@ -0,0 +1,118 @@ +# LaunchDarkly AI SDK OpenAI Provider for Server-Side JavaScript + +[![NPM][server-ai-openai-npm-badge]][server-ai-openai-npm-link] +[![Actions Status][server-ai-openai-ci-badge]][server-ai-openai-ci] +[![Documentation][server-ai-openai-ghp-badge]][server-ai-openai-ghp-link] +[![NPM][server-ai-openai-dm-badge]][server-ai-openai-npm-link] +[![NPM][server-ai-openai-dt-badge]][server-ai-openai-npm-link] + +# ⛔️⛔️⛔️⛔️ + +> [!CAUTION] +> This library is a alpha version and should not be considered ready for production use while this message is visible. + +> [!NOTE] +> This provider currently uses OpenAI's completion API. We plan to migrate to the responses API in a future release to take advantage of improved functionality and performance. + +# ☝️☝️☝️☝️☝️☝️ + +## LaunchDarkly overview + +[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today! + +[![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) + +## Quick Setup + +This package provides OpenAI integration for the LaunchDarkly AI SDK. The simplest way to use it is with the LaunchDarkly AI SDK's `initChat` method: + +1. Install the required packages: + +```shell +npm install @launchdarkly/server-sdk-ai @launchdarkly/server-sdk-ai-openai --save +``` + +2. Create a chat session and use it: + +```typescript +import { init } from '@launchdarkly/node-server-sdk'; +import { initAi } from '@launchdarkly/server-sdk-ai'; + +// Initialize LaunchDarkly client +const ldClient = init(sdkKey); +const aiClient = initAi(ldClient); + +// Create a chat session +const defaultConfig = { + enabled: true, + model: { name: 'gpt-4' }, + provider: { name: 'openai' } +}; +const chat = await aiClient.initChat('my-chat-config', context, defaultConfig); + +if (chat) { + const response = await chat.invoke("What is the capital of France?"); + console.log(response.message.content); +} +``` + +For more information about using the LaunchDarkly AI SDK, see the [LaunchDarkly AI SDK documentation](https://github.com/launchdarkly/js-core/tree/main/packages/sdk/server-ai/README.md). + +## Advanced Usage + +For more control, you can use the OpenAI provider package directly with LaunchDarkly configurations: + +```typescript +import { OpenAIProvider } from '@launchdarkly/server-sdk-ai-openai'; +import { OpenAI } from 'openai'; + +// Create an OpenAI client +const client = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, +}); + +// Combine LaunchDarkly AI Config messages with user message +const configMessages = aiConfig.messages || []; +const userMessage = { role: 'user', content: 'What is the capital of France?' }; +const allMessages = [...configMessages, userMessage]; + +// Track the model call with LaunchDarkly tracking +const response = await aiConfig.tracker.trackMetricsOf( + (result) => OpenAIProvider.createAIMetrics(result), + () => client.chat.completions.create({ + model: 'gpt-4', + messages: allMessages, + temperature: 0.7, + }) +); + +console.log('AI Response:', response.choices[0].message.content); +``` + +## Contributing + +We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this SDK. + +## About LaunchDarkly + +- LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: + - Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. + - Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?). + - Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. + - Grant access to certain features based on user attributes, like payment plan (eg: users on the 'gold' plan get access to more features than users in the 'silver' plan). + - Disable parts of your application to facilitate maintenance, without taking everything offline. +- LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Check out [our documentation](https://docs.launchdarkly.com/sdk) for a complete list. +- Explore LaunchDarkly + - [launchdarkly.com](https://www.launchdarkly.com/ 'LaunchDarkly Main Website') for more information + - [docs.launchdarkly.com](https://docs.launchdarkly.com/ 'LaunchDarkly Documentation') for our documentation and SDK reference guides + - [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ 'LaunchDarkly API Documentation') for our API documentation + - [blog.launchdarkly.com](https://blog.launchdarkly.com/ 'LaunchDarkly Blog Documentation') for the latest product updates + +[server-ai-openai-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml/badge.svg +[server-ai-openai-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml +[server-ai-openai-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square +[server-ai-openai-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-openai +[server-ai-openai-ghp-badge]: https://img.shields.io/static/v1?label=GitHub+Pages&message=API+reference&color=00add8 +[server-ai-openai-ghp-link]: https://launchdarkly.github.io/js-core/packages/ai-providers/server-ai-openai/docs/ +[server-ai-openai-dm-badge]: https://img.shields.io/npm/dm/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square +[server-ai-openai-dt-badge]: https://img.shields.io/npm/dt/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square diff --git a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts new file mode 100644 index 0000000000..50bc4b9cde --- /dev/null +++ b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts @@ -0,0 +1,231 @@ +import { OpenAI } from 'openai'; + +import { OpenAIProvider } from '../src/OpenAIProvider'; + +// Mock OpenAI +jest.mock('openai', () => ({ + OpenAI: jest.fn().mockImplementation(() => ({ + chat: { + completions: { + create: jest.fn().mockResolvedValue({ + choices: [{ message: { content: 'Test response' } }], + usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, + }), + }, + }, + })), +})); + +describe('OpenAIProvider', () => { + let mockOpenAI: jest.Mocked; + let provider: OpenAIProvider; + + beforeEach(() => { + mockOpenAI = new OpenAI() as jest.Mocked; + provider = new OpenAIProvider(mockOpenAI, 'gpt-3.5-turbo', {}); + }); + + describe('createAIMetrics', () => { + it('creates metrics with success=true and token usage', () => { + const mockResponse = { + usage: { + prompt_tokens: 50, + completion_tokens: 50, + total_tokens: 100, + }, + }; + + const result = OpenAIProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 100, + input: 50, + output: 50, + }, + }); + }); + + it('creates metrics with success=true and no usage when usage is missing', () => { + const mockResponse = {}; + + const result = OpenAIProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: undefined, + }); + }); + + it('handles partial usage data', () => { + const mockResponse = { + usage: { + prompt_tokens: 30, + // completion_tokens and total_tokens missing + }, + }; + + const result = OpenAIProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 0, + input: 30, + output: 0, + }, + }); + }); + }); + + describe('invokeModel', () => { + it('invokes OpenAI chat completions and returns response', async () => { + const mockResponse = { + choices: [ + { + message: { + content: 'Hello! How can I help you today?', + }, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, + }, + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); + + expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Hello!' }], + }); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: 'Hello! How can I help you today?', + }, + metrics: { + success: true, + usage: { + total: 25, + input: 10, + output: 15, + }, + }, + }); + }); + + it('returns unsuccessful response when no content in response', async () => { + const mockResponse = { + choices: [ + { + message: { + // content is missing + }, + }, + ], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + usage: undefined, + }, + }); + }); + + it('returns unsuccessful response when choices array is empty', async () => { + const mockResponse = { + choices: [], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + usage: undefined, + }, + }); + }); + + it('returns unsuccessful response when choices is undefined', async () => { + const mockResponse = { + // choices is missing entirely + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + usage: undefined, + }, + }); + }); + }); + + describe('getClient', () => { + it('returns the underlying OpenAI client', () => { + const client = provider.getClient(); + expect(client).toBe(mockOpenAI); + }); + }); + + describe('create', () => { + it('creates OpenAIProvider with correct model and parameters', async () => { + const mockAiConfig = { + model: { + name: 'gpt-4', + parameters: { + temperature: 0.7, + max_tokens: 1000, + }, + }, + provider: { name: 'openai' }, + enabled: true, + tracker: {} as any, + toVercelAISDK: jest.fn(), + }; + + const result = await OpenAIProvider.create(mockAiConfig); + + expect(result).toBeInstanceOf(OpenAIProvider); + expect(result.getClient()).toBeDefined(); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-openai/jest.config.js b/packages/ai-providers/server-ai-openai/jest.config.js new file mode 100644 index 0000000000..f106eb3bc9 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/jest.config.js @@ -0,0 +1,7 @@ +module.exports = { + transform: { '^.+\\.ts?$': 'ts-jest' }, + testMatch: ['**/__tests__/**/*test.ts?(x)'], + testEnvironment: 'node', + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + collectCoverageFrom: ['src/**/*.ts'], +}; diff --git a/packages/ai-providers/server-ai-openai/package.json b/packages/ai-providers/server-ai-openai/package.json new file mode 100644 index 0000000000..45b3790601 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/package.json @@ -0,0 +1,54 @@ +{ + "name": "@launchdarkly/server-sdk-ai-openai", + "version": "0.0.0", + "description": "LaunchDarkly AI SDK OpenAI Provider for Server-Side JavaScript", + "homepage": "https://github.com/launchdarkly/js-core/tree/main/packages/ai-providers/server-ai-openai", + "repository": { + "type": "git", + "url": "https://github.com/launchdarkly/js-core.git" + }, + "main": "dist/index.js", + "types": "dist/index.d.ts", + "type": "commonjs", + "scripts": { + "build": "npx tsc", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test", + "test": "jest" + }, + "keywords": [ + "launchdarkly", + "ai", + "llm", + "openai" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "@launchdarkly/server-sdk-ai": "^0.12.0", + "openai": "^4.0.0" + }, + "devDependencies": { + "@launchdarkly/js-server-sdk-common": "2.16.2", + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@types/jest": "^29.5.3", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.6.1", + "prettier": "^3.0.0", + "ts-jest": "^29.1.1", + "typescript": "5.1.6" + }, + "peerDependencies": { + "@launchdarkly/js-server-sdk-common": "2.x" + } +} diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts new file mode 100644 index 0000000000..065d7e0a78 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -0,0 +1,122 @@ +import { OpenAI } from 'openai'; + +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; +import { + AIProvider, + ChatResponse, + LDAIConfig, + LDAIMetrics, + LDMessage, + LDTokenUsage, +} from '@launchdarkly/server-sdk-ai'; + +/** + * OpenAI implementation of AIProvider. + * This provider integrates OpenAI's chat completions API with LaunchDarkly's tracking capabilities. + */ +export class OpenAIProvider extends AIProvider { + private _client: OpenAI; + private _modelName: string; + private _parameters: Record; + + constructor( + client: OpenAI, + modelName: string, + parameters: Record, + logger?: LDLogger, + ) { + super(logger); + this._client = client; + this._modelName = modelName; + this._parameters = parameters; + } + + // ============================================================================= + // MAIN FACTORY METHOD + // ============================================================================= + + /** + * Static factory method to create an OpenAI AIProvider from an AI configuration. + */ + static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { + const client = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + }); + const modelName = aiConfig.model?.name || ''; + const parameters = aiConfig.model?.parameters || {}; + return new OpenAIProvider(client, modelName, parameters, logger); + } + + // ============================================================================= + // INSTANCE METHODS (AIProvider Implementation) + // ============================================================================= + + /** + * Invoke the OpenAI model with an array of messages. + */ + async invokeModel(messages: LDMessage[]): Promise { + // Call OpenAI chat completions API + const response = await this._client.chat.completions.create({ + model: this._modelName, + messages, + ...this._parameters, + }); + + // Generate metrics early (assumes success by default) + const metrics = OpenAIProvider.createAIMetrics(response); + + // Safely extract the first choice content using optional chaining + const content = response?.choices?.[0]?.message?.content || ''; + + if (!content) { + this.logger?.warn('OpenAI response has no content available'); + metrics.success = false; + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; + + return { + message: assistantMessage, + metrics, + }; + } + + /** + * Get the underlying OpenAI client instance. + */ + getClient(): OpenAI { + return this._client; + } + + // ============================================================================= + // STATIC UTILITY METHODS + // ============================================================================= + + /** + * Create AI metrics information from an OpenAI response. + * This method extracts token usage information and success status from OpenAI responses + * and returns a LaunchDarkly AIMetrics object. + */ + static createAIMetrics(openaiResponse: any): LDAIMetrics { + // Extract token usage if available + let usage: LDTokenUsage | undefined; + if (openaiResponse?.usage) { + const { prompt_tokens, completion_tokens, total_tokens } = openaiResponse.usage; + usage = { + total: total_tokens || 0, + input: prompt_tokens || 0, + output: completion_tokens || 0, + }; + } + + // OpenAI responses that complete successfully are considered successful by default + return { + success: true, + usage, + }; + } +} diff --git a/packages/ai-providers/server-ai-openai/src/index.ts b/packages/ai-providers/server-ai-openai/src/index.ts new file mode 100644 index 0000000000..bfdeac9b4b --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/index.ts @@ -0,0 +1 @@ +export { OpenAIProvider } from './OpenAIProvider'; diff --git a/packages/ai-providers/server-ai-openai/tsconfig.eslint.json b/packages/ai-providers/server-ai-openai/tsconfig.eslint.json new file mode 100644 index 0000000000..56c9b38305 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/packages/ai-providers/server-ai-openai/tsconfig.json b/packages/ai-providers/server-ai-openai/tsconfig.json new file mode 100644 index 0000000000..6238d6a0f5 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "CommonJS", + "lib": ["ES2020"], + "moduleResolution": "node", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "outDir": "./dist", + "rootDir": "./src", + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*"], + "exclude": ["dist", "node_modules", "**/*.test.ts", "**/*.spec.ts"] +} diff --git a/packages/ai-providers/server-ai-openai/tsconfig.ref.json b/packages/ai-providers/server-ai-openai/tsconfig.ref.json new file mode 100644 index 0000000000..0c86b2c554 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tsconfig.ref.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "include": ["src/**/*"], + "compilerOptions": { + "composite": true + } +} diff --git a/packages/ai-providers/server-ai-openai/typedoc.json b/packages/ai-providers/server-ai-openai/typedoc.json new file mode 100644 index 0000000000..7ac616b544 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/typedoc.json @@ -0,0 +1,5 @@ +{ + "extends": ["../../../typedoc.base.json"], + "entryPoints": ["src/index.ts"], + "out": "docs" +} diff --git a/release-please-config.json b/release-please-config.json index efd01f11cc..446afbcc86 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -8,6 +8,10 @@ "bump-minor-pre-major": true, "prerelease": true }, + "packages/ai-providers/server-ai-openai": { + "bump-minor-pre-major": true, + "prerelease": true + }, "packages/shared/common": {}, "packages/shared/sdk-client": {}, "packages/shared/sdk-server": {}, diff --git a/tsconfig.json b/tsconfig.json index 183b6ba379..edc4dd75e0 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -82,6 +82,9 @@ { "path": "./packages/ai-providers/server-ai-langchain/tsconfig.ref.json" }, + { + "path": "./packages/ai-providers/server-ai-openai/tsconfig.ref.json" + }, { "path": "./packages/ai-providers/server-ai-vercel/tsconfig.ref.json" }