From c71f2eef7bf81f90377689275b472e7741db7ee8 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 10 Oct 2025 21:38:43 +0000 Subject: [PATCH 1/7] feat: Add OpenAI Provider for AI SDK --- package.json | 1 + .../__tests__/OpenAIProvider.test.ts | 195 ++++++++++++++++++ .../server-ai-openai/jest.config.js | 14 ++ .../server-ai-openai/package.json | 54 +++++ .../server-ai-openai/src/OpenAIProvider.ts | 117 +++++++++++ .../server-ai-openai/src/index.ts | 1 + .../server-ai-openai/tsconfig.eslint.json | 5 + .../server-ai-openai/tsconfig.json | 20 ++ 8 files changed, 407 insertions(+) create mode 100644 packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts create mode 100644 packages/ai-providers/server-ai-openai/jest.config.js create mode 100644 packages/ai-providers/server-ai-openai/package.json create mode 100644 packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts create mode 100644 packages/ai-providers/server-ai-openai/src/index.ts create mode 100644 packages/ai-providers/server-ai-openai/tsconfig.eslint.json create mode 100644 packages/ai-providers/server-ai-openai/tsconfig.json diff --git a/package.json b/package.json index 313d3cf351..882ff4e455 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,7 @@ { "name": "@launchdarkly/js-core", "workspaces": [ + "packages/ai-providers/server-ai-openai", "packages/shared/common", "packages/shared/sdk-client", "packages/shared/sdk-server", diff --git a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts new file mode 100644 index 0000000000..14958c3c50 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts @@ -0,0 +1,195 @@ +import { OpenAI } from 'openai'; + +import { OpenAIProvider } from '../src/OpenAIProvider'; + +// Mock OpenAI +jest.mock('openai', () => { + return { + OpenAI: jest.fn().mockImplementation(() => ({ + chat: { + completions: { + create: jest.fn().mockResolvedValue({ + choices: [{ message: { content: 'Test response' } }], + usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, + }), + }, + }, + })), + }; +}); + +describe('OpenAIProvider', () => { + let mockOpenAI: jest.Mocked; + let provider: OpenAIProvider; + + beforeEach(() => { + mockOpenAI = new OpenAI() as jest.Mocked; + provider = new OpenAIProvider(mockOpenAI, 'gpt-3.5-turbo', {}); + }); + + + describe('createAIMetrics', () => { + it('creates metrics with success=true and token usage', () => { + const mockResponse = { + usage: { + prompt_tokens: 50, + completion_tokens: 50, + total_tokens: 100, + }, + }; + + const result = OpenAIProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 100, + input: 50, + output: 50, + }, + }); + }); + + it('creates metrics with success=true and no usage when usage is missing', () => { + const mockResponse = {}; + + const result = OpenAIProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: undefined, + }); + }); + + it('handles partial usage data', () => { + const mockResponse = { + usage: { + prompt_tokens: 30, + // completion_tokens and total_tokens missing + }, + }; + + const result = OpenAIProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 0, + input: 30, + output: 0, + }, + }); + }); + }); + + describe('invokeModel', () => { + it('invokes OpenAI chat completions and returns response', async () => { + const mockResponse = { + choices: [ + { + message: { + content: 'Hello! How can I help you today?', + }, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, + }, + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [ + { role: 'user' as const, content: 'Hello!' }, + ]; + + const result = await provider.invokeModel(messages); + + expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Hello!' }], + }); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: 'Hello! How can I help you today?', + }, + metrics: { + success: true, + usage: { + total: 25, + input: 10, + output: 15, + }, + }, + }); + }); + + it('throws error when no content in response', async () => { + const mockResponse = { + choices: [ + { + message: { + // content is missing + }, + }, + ], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [ + { role: 'user' as const, content: 'Hello!' }, + ]; + + await expect(provider.invokeModel(messages)).rejects.toThrow('No content in OpenAI response'); + }); + + it('handles empty choices array', async () => { + const mockResponse = { + choices: [], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [ + { role: 'user' as const, content: 'Hello!' }, + ]; + + await expect(provider.invokeModel(messages)).rejects.toThrow('No content in OpenAI response'); + }); + }); + + describe('getClient', () => { + it('returns the underlying OpenAI client', () => { + const client = provider.getClient(); + expect(client).toBe(mockOpenAI); + }); + }); + + describe('create', () => { + it('creates OpenAIProvider with correct model and parameters', async () => { + const mockAiConfig = { + model: { + name: 'gpt-4', + parameters: { + temperature: 0.7, + max_tokens: 1000, + }, + }, + provider: { name: 'openai' }, + enabled: true, + tracker: {} as any, + toVercelAISDK: jest.fn(), + }; + + const result = await OpenAIProvider.create(mockAiConfig); + + expect(result).toBeInstanceOf(OpenAIProvider); + expect(result.getClient()).toBeDefined(); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-openai/jest.config.js b/packages/ai-providers/server-ai-openai/jest.config.js new file mode 100644 index 0000000000..fea9c8773a --- /dev/null +++ b/packages/ai-providers/server-ai-openai/jest.config.js @@ -0,0 +1,14 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + roots: [''], + testMatch: ['**/__tests__/**/*.test.ts'], + collectCoverageFrom: [ + 'src/**/*.ts', + '!src/**/*.d.ts', + '!src/**/*.test.ts', + '!src/**/*.spec.ts', + ], + coverageDirectory: 'coverage', + coverageReporters: ['text', 'lcov', 'html'], +}; diff --git a/packages/ai-providers/server-ai-openai/package.json b/packages/ai-providers/server-ai-openai/package.json new file mode 100644 index 0000000000..f24a0dec06 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/package.json @@ -0,0 +1,54 @@ +{ + "name": "@launchdarkly/server-sdk-ai-openai", + "version": "0.0.0", + "description": "LaunchDarkly AI SDK OpenAI Provider for Server-Side JavaScript", + "homepage": "https://github.com/launchdarkly/js-core/tree/main/packages/ai-providers/server-ai-openai", + "repository": { + "type": "git", + "url": "https://github.com/launchdarkly/js-core.git" + }, + "main": "dist/index.js", + "types": "dist/index.d.ts", + "type": "commonjs", + "scripts": { + "build": "npx tsc", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test", + "test": "jest" + }, + "keywords": [ + "launchdarkly", + "ai", + "llm", + "openai" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "@launchdarkly/server-sdk-ai": "0.11.4", + "openai": "^4.0.0" + }, + "devDependencies": { + "@launchdarkly/js-server-sdk-common": "2.16.2", + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@types/jest": "^29.5.3", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.6.1", + "prettier": "^3.0.0", + "ts-jest": "^29.1.1", + "typescript": "5.1.6" + }, + "peerDependencies": { + "@launchdarkly/js-server-sdk-common": "2.x" + } +} diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts new file mode 100644 index 0000000000..145b162d12 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -0,0 +1,117 @@ +import { OpenAI } from 'openai'; + +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; +import { + AIProvider, + ChatResponse, + LDAIConfig, + LDAIMetrics, + LDMessage, + LDTokenUsage, +} from '@launchdarkly/server-sdk-ai'; + +/** + * OpenAI implementation of AIProvider. + * This provider integrates OpenAI's chat completions API with LaunchDarkly's tracking capabilities. + */ +export class OpenAIProvider extends AIProvider { + private _client: OpenAI; + private _modelName: string; + private _parameters: Record; + + constructor(client: OpenAI, modelName: string, parameters: Record, logger?: LDLogger) { + super(logger); + this._client = client; + this._modelName = modelName; + this._parameters = parameters; + } + + // ============================================================================= + // MAIN FACTORY METHOD + // ============================================================================= + + /** + * Static factory method to create an OpenAI AIProvider from an AI configuration. + */ + static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { + const client = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + }); + const modelName = aiConfig.model?.name || ''; + const parameters = aiConfig.model?.parameters || {}; + return new OpenAIProvider(client, modelName, parameters, logger); + } + + // ============================================================================= + // INSTANCE METHODS (AIProvider Implementation) + // ============================================================================= + + /** + * Invoke the OpenAI model with an array of messages. + */ + async invokeModel(messages: LDMessage[]): Promise { + // Call OpenAI chat completions API + const response = await this._client.chat.completions.create({ + model: this._modelName, + messages: messages, + ...this._parameters, + }); + + // Extract the first choice content + const choice = response.choices[0]; + if (!choice?.message?.content) { + throw new Error('No content in OpenAI response'); + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content: choice.message.content, + }; + + // Extract metrics including token usage and success status + const metrics = OpenAIProvider.createAIMetrics(response); + + return { + message: assistantMessage, + metrics, + }; + } + + /** + * Get the underlying OpenAI client instance. + */ + getClient(): OpenAI { + return this._client; + } + + + // ============================================================================= + // STATIC UTILITY METHODS + // ============================================================================= + + /** + * Create AI metrics information from an OpenAI response. + * This method extracts token usage information and success status from OpenAI responses + * and returns a LaunchDarkly AIMetrics object. + */ + static createAIMetrics(openaiResponse: any): LDAIMetrics { + // Extract token usage if available + let usage: LDTokenUsage | undefined; + if (openaiResponse?.usage) { + const { prompt_tokens, completion_tokens, total_tokens } = openaiResponse.usage; + usage = { + total: total_tokens || 0, + input: prompt_tokens || 0, + output: completion_tokens || 0, + }; + } + + // OpenAI responses that complete successfully are considered successful + return { + success: true, + usage, + }; + } + +} diff --git a/packages/ai-providers/server-ai-openai/src/index.ts b/packages/ai-providers/server-ai-openai/src/index.ts new file mode 100644 index 0000000000..bfdeac9b4b --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/index.ts @@ -0,0 +1 @@ +export { OpenAIProvider } from './OpenAIProvider'; diff --git a/packages/ai-providers/server-ai-openai/tsconfig.eslint.json b/packages/ai-providers/server-ai-openai/tsconfig.eslint.json new file mode 100644 index 0000000000..56c9b38305 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/packages/ai-providers/server-ai-openai/tsconfig.json b/packages/ai-providers/server-ai-openai/tsconfig.json new file mode 100644 index 0000000000..6238d6a0f5 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "CommonJS", + "lib": ["ES2020"], + "moduleResolution": "node", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "outDir": "./dist", + "rootDir": "./src", + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*"], + "exclude": ["dist", "node_modules", "**/*.test.ts", "**/*.spec.ts"] +} From 3455f18507d9fe62d70dcb0255eee10db4898f98 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 13 Oct 2025 15:07:01 +0000 Subject: [PATCH 2/7] fix: Target proper version of AI SDK for openai --- packages/ai-providers/server-ai-openai/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ai-providers/server-ai-openai/package.json b/packages/ai-providers/server-ai-openai/package.json index f24a0dec06..45b3790601 100644 --- a/packages/ai-providers/server-ai-openai/package.json +++ b/packages/ai-providers/server-ai-openai/package.json @@ -27,7 +27,7 @@ "author": "LaunchDarkly", "license": "Apache-2.0", "dependencies": { - "@launchdarkly/server-sdk-ai": "0.11.4", + "@launchdarkly/server-sdk-ai": "^0.12.0", "openai": "^4.0.0" }, "devDependencies": { From 21fa0abeb3cd0cdc0b96383e2ea285fa0e05bb56 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 13 Oct 2025 21:00:35 +0000 Subject: [PATCH 3/7] add release-please for openai package --- .github/workflows/release-please.yml | 21 +++++++++++++++++++++ .release-please-manifest.json | 1 + release-please-config.json | 4 ++++ 3 files changed, 26 insertions(+) diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 358a6caa92..5027cde82c 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -28,6 +28,7 @@ jobs: package-browser-released: ${{ steps.release.outputs['packages/sdk/browser--release_created'] }} package-server-ai-released: ${{ steps.release.outputs['packages/sdk/server-ai--release_created'] }} package-server-ai-langchain-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--release_created'] }} + package-server-ai-openai-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--release_created'] }} package-browser-telemetry-released: ${{ steps.release.outputs['packages/telemetry/browser-telemetry--release_created'] }} package-combined-browser-released: ${{ steps.release.outputs['packages/sdk/combined-browser--release_created'] }} steps: @@ -481,3 +482,23 @@ jobs: with: workspace_path: packages/ai-providers/server-ai-langchain aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + + release-server-ai-openai: + runs-on: ubuntu-latest + needs: ['release-please', 'release-server-ai'] + permissions: + id-token: write + contents: write + if: ${{ always() && !failure() && !cancelled() && needs.release-please.outputs.package-server-ai-openai-released == 'true'}} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 22.x + registry-url: 'https://registry.npmjs.org' + - id: release-server-ai-openai + name: Full release of packages/ai-providers/server-ai-openai + uses: ./actions/full-release + with: + workspace_path: packages/ai-providers/server-ai-openai + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1dea9a82d7..d97c63c26d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,5 +1,6 @@ { "packages/ai-providers/server-ai-langchain": "0.0.0", + "packages/ai-providers/server-ai-openai": "0.0.0", "packages/sdk/akamai-base": "3.0.10", "packages/sdk/akamai-edgekv": "1.4.12", "packages/sdk/browser": "0.8.1", diff --git a/release-please-config.json b/release-please-config.json index 0a0725f91b..f29b244035 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -4,6 +4,10 @@ "bump-minor-pre-major": true, "prerelease": true }, + "packages/ai-providers/server-ai-openai": { + "bump-minor-pre-major": true, + "prerelease": true + }, "packages/shared/common": {}, "packages/shared/sdk-client": {}, "packages/shared/sdk-server": {}, From 20e5c15f7e3377ad1fb2ab6dd6e693a03f6cba0d Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 13 Oct 2025 22:15:27 +0000 Subject: [PATCH 4/7] don't throw exception when the response is empty. --- .../__tests__/OpenAIProvider.test.ts | 90 +++++++++++++------ .../server-ai-openai/src/OpenAIProvider.ts | 31 ++++--- 2 files changed, 81 insertions(+), 40 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts index 14958c3c50..50bc4b9cde 100644 --- a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts +++ b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts @@ -3,20 +3,18 @@ import { OpenAI } from 'openai'; import { OpenAIProvider } from '../src/OpenAIProvider'; // Mock OpenAI -jest.mock('openai', () => { - return { - OpenAI: jest.fn().mockImplementation(() => ({ - chat: { - completions: { - create: jest.fn().mockResolvedValue({ - choices: [{ message: { content: 'Test response' } }], - usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, - }), - }, +jest.mock('openai', () => ({ + OpenAI: jest.fn().mockImplementation(() => ({ + chat: { + completions: { + create: jest.fn().mockResolvedValue({ + choices: [{ message: { content: 'Test response' } }], + usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, + }), }, - })), - }; -}); + }, + })), +})); describe('OpenAIProvider', () => { let mockOpenAI: jest.Mocked; @@ -27,7 +25,6 @@ describe('OpenAIProvider', () => { provider = new OpenAIProvider(mockOpenAI, 'gpt-3.5-turbo', {}); }); - describe('createAIMetrics', () => { it('creates metrics with success=true and token usage', () => { const mockResponse = { @@ -101,9 +98,7 @@ describe('OpenAIProvider', () => { (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); - const messages = [ - { role: 'user' as const, content: 'Hello!' }, - ]; + const messages = [{ role: 'user' as const, content: 'Hello!' }]; const result = await provider.invokeModel(messages); @@ -128,7 +123,7 @@ describe('OpenAIProvider', () => { }); }); - it('throws error when no content in response', async () => { + it('returns unsuccessful response when no content in response', async () => { const mockResponse = { choices: [ { @@ -141,25 +136,66 @@ describe('OpenAIProvider', () => { (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); - const messages = [ - { role: 'user' as const, content: 'Hello!' }, - ]; + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); - await expect(provider.invokeModel(messages)).rejects.toThrow('No content in OpenAI response'); + expect(result).toEqual({ + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + usage: undefined, + }, + }); }); - it('handles empty choices array', async () => { + it('returns unsuccessful response when choices array is empty', async () => { const mockResponse = { choices: [], }; (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); - const messages = [ - { role: 'user' as const, content: 'Hello!' }, - ]; + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + usage: undefined, + }, + }); + }); + + it('returns unsuccessful response when choices is undefined', async () => { + const mockResponse = { + // choices is missing entirely + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Hello!' }]; - await expect(provider.invokeModel(messages)).rejects.toThrow('No content in OpenAI response'); + const result = await provider.invokeModel(messages); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + usage: undefined, + }, + }); }); }); diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts index 145b162d12..065d7e0a78 100644 --- a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -19,7 +19,12 @@ export class OpenAIProvider extends AIProvider { private _modelName: string; private _parameters: Record; - constructor(client: OpenAI, modelName: string, parameters: Record, logger?: LDLogger) { + constructor( + client: OpenAI, + modelName: string, + parameters: Record, + logger?: LDLogger, + ) { super(logger); this._client = client; this._modelName = modelName; @@ -53,25 +58,27 @@ export class OpenAIProvider extends AIProvider { // Call OpenAI chat completions API const response = await this._client.chat.completions.create({ model: this._modelName, - messages: messages, + messages, ...this._parameters, }); - // Extract the first choice content - const choice = response.choices[0]; - if (!choice?.message?.content) { - throw new Error('No content in OpenAI response'); + // Generate metrics early (assumes success by default) + const metrics = OpenAIProvider.createAIMetrics(response); + + // Safely extract the first choice content using optional chaining + const content = response?.choices?.[0]?.message?.content || ''; + + if (!content) { + this.logger?.warn('OpenAI response has no content available'); + metrics.success = false; } // Create the assistant message const assistantMessage: LDMessage = { role: 'assistant', - content: choice.message.content, + content, }; - // Extract metrics including token usage and success status - const metrics = OpenAIProvider.createAIMetrics(response); - return { message: assistantMessage, metrics, @@ -85,7 +92,6 @@ export class OpenAIProvider extends AIProvider { return this._client; } - // ============================================================================= // STATIC UTILITY METHODS // ============================================================================= @@ -107,11 +113,10 @@ export class OpenAIProvider extends AIProvider { }; } - // OpenAI responses that complete successfully are considered successful + // OpenAI responses that complete successfully are considered successful by default return { success: true, usage, }; } - } From 422f8852a4b1f978e72e00e487c00e3424f646b1 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 14 Oct 2025 17:38:06 +0000 Subject: [PATCH 5/7] add CI, docs, and missing build files --- .github/workflows/manual-publish.yml | 1 + .github/workflows/server-ai-openai.yml | 27 ++++ .../ai-providers/server-ai-openai/README.md | 115 ++++++++++++++++++ .../server-ai-openai/jest.config.js | 15 +-- .../server-ai-openai/tsconfig.ref.json | 7 ++ .../server-ai-openai/typedoc.json | 5 + tsconfig.json | 3 + 7 files changed, 162 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/server-ai-openai.yml create mode 100644 packages/ai-providers/server-ai-openai/README.md create mode 100644 packages/ai-providers/server-ai-openai/tsconfig.ref.json create mode 100644 packages/ai-providers/server-ai-openai/typedoc.json diff --git a/.github/workflows/manual-publish.yml b/.github/workflows/manual-publish.yml index 9a0406ceaa..82131ef804 100644 --- a/.github/workflows/manual-publish.yml +++ b/.github/workflows/manual-publish.yml @@ -35,6 +35,7 @@ on: - packages/tooling/jest - packages/sdk/browser - packages/sdk/server-ai + - packages/ai-providers/server-ai-openai - packages/telemetry/browser-telemetry - packages/sdk/combined-browser prerelease: diff --git a/.github/workflows/server-ai-openai.yml b/.github/workflows/server-ai-openai.yml new file mode 100644 index 0000000000..36d7020543 --- /dev/null +++ b/.github/workflows/server-ai-openai.yml @@ -0,0 +1,27 @@ +name: ai-providers/server-ai-openai + +on: + push: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' #Do not need to run CI for markdown changes. + pull_request: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' + +jobs: + build-test-openai-provider: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 + with: + node-version: 22.x + registry-url: 'https://registry.npmjs.org' + - id: shared + name: Shared CI Steps + uses: ./actions/ci + with: + workspace_name: '@launchdarkly/server-sdk-ai-openai' + workspace_path: packages/ai-providers/server-ai-openai diff --git a/packages/ai-providers/server-ai-openai/README.md b/packages/ai-providers/server-ai-openai/README.md new file mode 100644 index 0000000000..766a94839a --- /dev/null +++ b/packages/ai-providers/server-ai-openai/README.md @@ -0,0 +1,115 @@ +# LaunchDarkly AI SDK OpenAI Provider for Server-Side JavaScript + +[![NPM][server-ai-openai-npm-badge]][server-ai-openai-npm-link] +[![Actions Status][server-ai-openai-ci-badge]][server-ai-openai-ci] +[![Documentation][server-ai-openai-ghp-badge]][server-ai-openai-ghp-link] +[![NPM][server-ai-openai-dm-badge]][server-ai-openai-npm-link] +[![NPM][server-ai-openai-dt-badge]][server-ai-openai-npm-link] + +# ⛔️⛔️⛔️⛔️ + +> [!CAUTION] +> This library is a alpha version and should not be considered ready for production use while this message is visible. + +# ☝️☝️☝️☝️☝️☝️ + +## LaunchDarkly overview + +[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today! + +[![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) + +## Quick Setup + +This package provides OpenAI integration for the LaunchDarkly AI SDK. The simplest way to use it is with the LaunchDarkly AI SDK's `initChat` method: + +1. Install the required packages: + +```shell +npm install @launchdarkly/server-sdk-ai @launchdarkly/server-sdk-ai-openai --save +``` + +2. Create a chat session and use it: + +```typescript +import { init } from '@launchdarkly/node-server-sdk'; +import { initAi } from '@launchdarkly/server-sdk-ai'; + +// Initialize LaunchDarkly client +const ldClient = init(sdkKey); +const aiClient = initAi(ldClient); + +// Create a chat session +const defaultConfig = { + enabled: true, + model: { name: 'gpt-4' }, + provider: { name: 'openai' } +}; +const chat = await aiClient.initChat('my-chat-config', context, defaultConfig); + +if (chat) { + const response = await chat.invoke("What is the capital of France?"); + console.log(response.message.content); +} +``` + +For more information about using the LaunchDarkly AI SDK, see the [LaunchDarkly AI SDK documentation](https://github.com/launchdarkly/js-core/tree/main/packages/sdk/server-ai/README.md). + +## Advanced Usage + +For more control, you can use the OpenAI provider package directly with LaunchDarkly configurations: + +```typescript +import { OpenAIProvider } from '@launchdarkly/server-sdk-ai-openai'; +import { OpenAI } from 'openai'; + +// Create an OpenAI client +const client = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, +}); + +// Combine LaunchDarkly AI Config messages with user message +const configMessages = aiConfig.messages || []; +const userMessage = { role: 'user', content: 'What is the capital of France?' }; +const allMessages = [...configMessages, userMessage]; + +// Track the model call with LaunchDarkly tracking +const response = await aiConfig.tracker.trackMetricsOf( + (result) => OpenAIProvider.createAIMetrics(result), + () => client.chat.completions.create({ + model: 'gpt-4', + messages: allMessages, + temperature: 0.7, + }) +); + +console.log('AI Response:', response.choices[0].message.content); +``` + +## Contributing + +We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this SDK. + +## About LaunchDarkly + +- LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: + - Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. + - Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?). + - Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. + - Grant access to certain features based on user attributes, like payment plan (eg: users on the 'gold' plan get access to more features than users in the 'silver' plan). + - Disable parts of your application to facilitate maintenance, without taking everything offline. +- LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Check out [our documentation](https://docs.launchdarkly.com/sdk) for a complete list. +- Explore LaunchDarkly + - [launchdarkly.com](https://www.launchdarkly.com/ 'LaunchDarkly Main Website') for more information + - [docs.launchdarkly.com](https://docs.launchdarkly.com/ 'LaunchDarkly Documentation') for our documentation and SDK reference guides + - [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ 'LaunchDarkly API Documentation') for our API documentation + - [blog.launchdarkly.com](https://blog.launchdarkly.com/ 'LaunchDarkly Blog Documentation') for the latest product updates + +[server-ai-openai-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml/badge.svg +[server-ai-openai-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml +[server-ai-openai-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square +[server-ai-openai-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-openai +[server-ai-openai-ghp-badge]: https://img.shields.io/static/v1?label=GitHub+Pages&message=API+reference&color=00add8 +[server-ai-openai-ghp-link]: https://launchdarkly.github.io/js-core/packages/ai-providers/server-ai-openai/docs/ +[server-ai-openai-dm-badge]: https://img.shields.io/npm/dm/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square +[server-ai-openai-dt-badge]: https://img.shields.io/npm/dt/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square diff --git a/packages/ai-providers/server-ai-openai/jest.config.js b/packages/ai-providers/server-ai-openai/jest.config.js index fea9c8773a..f106eb3bc9 100644 --- a/packages/ai-providers/server-ai-openai/jest.config.js +++ b/packages/ai-providers/server-ai-openai/jest.config.js @@ -1,14 +1,7 @@ module.exports = { - preset: 'ts-jest', + transform: { '^.+\\.ts?$': 'ts-jest' }, + testMatch: ['**/__tests__/**/*test.ts?(x)'], testEnvironment: 'node', - roots: [''], - testMatch: ['**/__tests__/**/*.test.ts'], - collectCoverageFrom: [ - 'src/**/*.ts', - '!src/**/*.d.ts', - '!src/**/*.test.ts', - '!src/**/*.spec.ts', - ], - coverageDirectory: 'coverage', - coverageReporters: ['text', 'lcov', 'html'], + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + collectCoverageFrom: ['src/**/*.ts'], }; diff --git a/packages/ai-providers/server-ai-openai/tsconfig.ref.json b/packages/ai-providers/server-ai-openai/tsconfig.ref.json new file mode 100644 index 0000000000..0c86b2c554 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tsconfig.ref.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "include": ["src/**/*"], + "compilerOptions": { + "composite": true + } +} diff --git a/packages/ai-providers/server-ai-openai/typedoc.json b/packages/ai-providers/server-ai-openai/typedoc.json new file mode 100644 index 0000000000..7ac616b544 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/typedoc.json @@ -0,0 +1,5 @@ +{ + "extends": ["../../../typedoc.base.json"], + "entryPoints": ["src/index.ts"], + "out": "docs" +} diff --git a/tsconfig.json b/tsconfig.json index ccc30f0944..2f90d7130f 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -81,6 +81,9 @@ }, { "path": "./packages/ai-providers/server-ai-langchain/tsconfig.ref.json" + }, + { + "path": "./packages/ai-providers/server-ai-openai/tsconfig.ref.json" } ] } From 7f7326b209c85370d12448f659cdb3965a5cc350 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 14 Oct 2025 17:53:25 +0000 Subject: [PATCH 6/7] note that we are using the completion api and will move to responses soon --- packages/ai-providers/server-ai-openai/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/ai-providers/server-ai-openai/README.md b/packages/ai-providers/server-ai-openai/README.md index 766a94839a..be61c32bd0 100644 --- a/packages/ai-providers/server-ai-openai/README.md +++ b/packages/ai-providers/server-ai-openai/README.md @@ -11,6 +11,9 @@ > [!CAUTION] > This library is a alpha version and should not be considered ready for production use while this message is visible. +> [!NOTE] +> This provider currently uses OpenAI's completion API. We plan to migrate to the responses API in a future release to take advantage of improved functionality and performance. + # ☝️☝️☝️☝️☝️☝️ ## LaunchDarkly overview From 42d12cdf27bdb711f6d6a10825d680df61b99653 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 14 Oct 2025 20:01:14 +0000 Subject: [PATCH 7/7] Add to base readme --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 6d26cac6af..44e17805bc 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ This includes shared libraries, used by SDKs and other tools, as well as SDKs. | AI Providers | npm | issues | tests | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------- | ------------------------------------------- | ------------------------------------------------------------------- | | [@launchdarkly/server-sdk-ai-langchain](packages/ai-providers/server-ai-langchain/README.md) | [![NPM][server-ai-langchain-npm-badge]][server-ai-langchain-npm-link] | [server-ai-langchain][package-ai-providers-server-ai-langchain-issues] | [![Actions Status][server-ai-langchain-ci-badge]][server-ai-langchain-ci] | +| [@launchdarkly/server-sdk-ai-openai](packages/ai-providers/server-ai-openai/README.md) | [![NPM][server-ai-openai-npm-badge]][server-ai-openai-npm-link] | [server-ai-openai][package-ai-providers-server-ai-openai-issues] | [![Actions Status][server-ai-openai-ci-badge]][server-ai-openai-ci] | | [@launchdarkly/server-sdk-ai-vercel](packages/ai-providers/server-ai-vercel/README.md) | [![NPM][server-ai-vercel-npm-badge]][server-ai-vercel-npm-link] | [server-ai-vercel][package-ai-providers-server-ai-vercel-issues] | [![Actions Status][server-ai-vercel-ci-badge]][server-ai-vercel-ci] | ## Organization @@ -231,6 +232,12 @@ We encourage pull requests and other contributions from the community. Check out [server-ai-langchain-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-langchain.svg?style=flat-square [server-ai-langchain-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-langchain [package-ai-providers-server-ai-langchain-issues]: https://github.com/launchdarkly/js-core/issues?q=is%3Aissue+is%3Aopen+label%3A%22package%3A+ai-providers%2Fserver-ai-langchain%22+ +[//]: # 'ai-providers/server-ai-openai' +[server-ai-openai-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml/badge.svg +[server-ai-openai-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-openai.yml +[server-ai-openai-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-openai.svg?style=flat-square +[server-ai-openai-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-openai +[package-ai-providers-server-ai-openai-issues]: https://github.com/launchdarkly/js-core/issues?q=is%3Aissue+is%3Aopen+label%3A%22package%3A+ai-providers%2Fserver-ai-openai%22+ [//]: # 'ai-providers/server-ai-vercel' [server-ai-vercel-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml/badge.svg [server-ai-vercel-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml