diff --git a/.github/workflows/manual-publish-docs.yml b/.github/workflows/manual-publish-docs.yml index 397bc1a58e..907b270001 100644 --- a/.github/workflows/manual-publish-docs.yml +++ b/.github/workflows/manual-publish-docs.yml @@ -20,6 +20,7 @@ on: - packages/store/node-server-sdk-dynamodb - packages/telemetry/node-server-sdk-otel - packages/sdk/browser + - packages/sdk/server-ai name: Publish Documentation jobs: build-publish: diff --git a/.github/workflows/manual-publish.yml b/.github/workflows/manual-publish.yml index c8ba253ade..c1ca69e3b3 100644 --- a/.github/workflows/manual-publish.yml +++ b/.github/workflows/manual-publish.yml @@ -33,6 +33,7 @@ on: - packages/telemetry/node-server-sdk-otel - packages/tooling/jest - packages/sdk/browser + - packages/sdk/server-ai prerelease: description: 'Is this a prerelease. If so, then the latest tag will not be updated in npm.' type: boolean diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 57a260d962..33b7a55f1f 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -25,6 +25,7 @@ jobs: package-tooling-jest-release: ${{ steps.release.outputs['packages/tooling/jest--release_created'] }} package-react-universal-release: ${{ steps.release.outputs['packages/sdk/react-universal--release_created'] }} package-browser-released: ${{ steps.release.outputs['packages/sdk/browser--release_created'] }} + package-server-ai-released: ${{ steps.release.outputs['packages/sdk/server-ai--release_created'] }} steps: - uses: googleapis/release-please-action@v4 id: release @@ -377,3 +378,23 @@ jobs: with: workspace_path: packages/sdk/react-universal aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + + release-server-ai: + runs-on: ubuntu-latest + needs: ['release-please', 'release-sdk-server'] + permissions: + id-token: write + contents: write + if: ${{ always() && !failure() && !cancelled() && needs.release-please.outputs.package-server-ai-released == 'true'}} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20.x + registry-url: 'https://registry.npmjs.org' + - id: release-react-native + name: Full release of packages/sdk/server-ai + uses: ./actions/full-release + with: + workspace_path: packages/sdk/server-ai + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} diff --git a/.github/workflows/server-ai.yml b/.github/workflows/server-ai.yml new file mode 100644 index 0000000000..e403cb52ec --- /dev/null +++ b/.github/workflows/server-ai.yml @@ -0,0 +1,37 @@ +name: sdk/server-ai + +on: + push: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' #Do not need to run CI for markdown changes. + pull_request: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' + +jobs: + build-test-node-server-otel: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20.x + registry-url: 'https://registry.npmjs.org' + - id: shared + name: Shared CI Steps + uses: ./actions/ci + with: + workspace_name: '@launchdarkly/server-sdk-ai' + workspace_path: packages/sdk/server-ai + - name: Build bedrock example + run: | + yarn workspaces focus @launchdarkly/hello-ai-bedrock + yarn workspace @launchdarkly/hello-ai-bedrock lint + yarn workspaces foreach -pR --topological-dev --from '@launchdarkly/hello-ai-bedrock' run build + - name: Build OpenAI example + run: | + yarn workspaces focus @launchdarkly/hello-openai + yarn workspace @launchdarkly/hello-openai lint + yarn workspaces foreach -pR --topological-dev --from '@launchdarkly/hello-openai' run build diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 262b815739..719eb98c30 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -13,5 +13,6 @@ "packages/shared/sdk-client": "1.12.0", "packages/sdk/react-native": "10.9.2", "packages/telemetry/node-server-sdk-otel": "1.1.1", - "packages/sdk/browser": "0.3.0" + "packages/sdk/browser": "0.3.0", + "packages/sdk/server-ai": "0.0.0" } diff --git a/package.json b/package.json index eb764fee94..38052de4db 100644 --- a/package.json +++ b/package.json @@ -25,7 +25,10 @@ "packages/tooling/jest/example/react-native-example", "packages/sdk/browser", "packages/sdk/browser/contract-tests/entity", - "packages/sdk/browser/contract-tests/adapter" + "packages/sdk/browser/contract-tests/adapter", + "packages/sdk/server-ai", + "packages/sdk/server-ai/examples/bedrock", + "packages/sdk/server-ai/examples/openai" ], "private": true, "scripts": { diff --git a/packages/sdk/server-ai/README.md b/packages/sdk/server-ai/README.md new file mode 100644 index 0000000000..e4ae013f12 --- /dev/null +++ b/packages/sdk/server-ai/README.md @@ -0,0 +1,68 @@ +# LaunchDarkly AI SDK for Server-Side JavaScript + +# ⛔️⛔️⛔️⛔️ + +> [!CAUTION] +> This library is a alpha version and should not be considered ready for production use while this message is visible. + +# ☝️☝️☝️☝️☝️☝️ + +## LaunchDarkly overview + +[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today! + +[![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) + +## Quick Setup + +This assumes that you have already installed the LaunchDarkly Node.js SDK, or a compatible edge +SDK. + +1. Install this package with `npm` or `yarn`: + +```shell +npm install @launchdarkly/server-sdk-ai --save +``` + +2. Create an AI SDK instance: + +```typescript +// The ldClient instance should be created based on the instructions in the relevant SDK. +const aiClient = initAi(ldClient); +``` + +3. Evaluate a model configuration: +```typescript + const config = await aiClient.modelConfig( + aiConfigKey!, + context, + { enabled: false }, + { myVariable: 'My User Defined Variable' }, + ); +``` + +For an example of how to use the config please refer to the examples folder. + +## Contributing + +We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this SDK. + +## About LaunchDarkly + +- LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: + - Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. + - Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?). + - Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. + - Grant access to certain features based on user attributes, like payment plan (eg: users on the ‘gold’ plan get access to more features than users in the ‘silver’ plan). + - Disable parts of your application to facilitate maintenance, without taking everything offline. +- LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Check out [our documentation](https://docs.launchdarkly.com/sdk) for a complete list. +- Explore LaunchDarkly + - [launchdarkly.com](https://www.launchdarkly.com/ 'LaunchDarkly Main Website') for more information + - [docs.launchdarkly.com](https://docs.launchdarkly.com/ 'LaunchDarkly Documentation') for our documentation and SDK reference guides + - [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ 'LaunchDarkly API Documentation') for our API documentation + - [blog.launchdarkly.com](https://blog.launchdarkly.com/ 'LaunchDarkly Blog Documentation') for the latest product updates + +[node-otel-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/node-otel.yml/badge.svg +[node-otel-ci]: https://github.com/launchdarkly/js-core/actions/workflows/node-otel.yml +[node-otel-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/node-server-sdk-otel.svg?style=flat-square +[node-otel-npm-link]: https://www.npmjs.com/package/@launchdarkly/node-server-sdk-otel diff --git a/packages/sdk/server-ai/examples/bedrock/README.md b/packages/sdk/server-ai/examples/bedrock/README.md new file mode 100644 index 0000000000..a997129f82 --- /dev/null +++ b/packages/sdk/server-ai/examples/bedrock/README.md @@ -0,0 +1,46 @@ +# LaunchDarkly AI SDK for AWS Bedrock Example + +This package demonstrates the integration of LaunchDarkly's AI SDK with AWS Bedrock, allowing you to leverage LaunchDarkly's AI Config capabilities in AI-powered applications using AWS Bedrock. + +## Installation and Build + +When running as part of the js-core mono-repo the project will use local dependencies. +As such those dependencies need built. + +In the root of the repository run: + +```bash +yarn +``` + +And then + +```bash +yarn build +``` + +## Configuration + +Before running the example, make sure to set the following environment variables: + +- `LAUNCHDARKLY_SDK_KEY`: Your LaunchDarkly SDK key +- `LAUNCHDARKLY_AI_CONFIG_KEY`: Your LaunchDarkly AI configuration key (defaults to 'sample-ai-config' if not set) + +Additionally, ensure you have proper AWS credentials configured to access Bedrock services. + +## Usage + +The main script (`index.js`) demonstrates how to: + +1. Initialize the LaunchDarkly SDK +2. Set up a user context +3. Initialize the LaunchDarkly AI client +4. Retrieve an AI model configuration +5. Send a prompt to AWS Bedrock +6. Track token usage + +To run the example (in the bedrock directory): + +```bash +yarn start +``` diff --git a/packages/sdk/server-ai/examples/bedrock/package.json b/packages/sdk/server-ai/examples/bedrock/package.json new file mode 100644 index 0000000000..1461cee305 --- /dev/null +++ b/packages/sdk/server-ai/examples/bedrock/package.json @@ -0,0 +1,54 @@ +{ + "name": "@launchdarkly/hello-ai-bedrock", + "version": "0.1.0", + "description": "LaunchDarkly AI SDK for Node.js", + "private": true, + "main": "dist/index.js", + "types": "dist/index.d.ts", + "type": "commonjs", + "scripts": { + "build": "tsc", + "start": "yarn build && node ./dist/index.js", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test" + }, + "keywords": [ + "launchdarkly", + "ai", + "llm" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "@aws-sdk/client-bedrock-runtime": "^3.679.0", + "@launchdarkly/node-server-sdk": "^9.7.1", + "@launchdarkly/server-sdk-ai": "0.1.0" + }, + "devDependencies": { + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@tsconfig/node20": "20.1.4", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.7.0", + "prettier": "^3.0.0", + "rimraf": "^5.0.5", + "typedoc": "0.25.0", + "typescript": "^5.5.3" + }, + "directories": { + "example": "example" + }, + "repository": { + "type": "git", + "url": "github.com/launchdarkly/js-core" + } +} diff --git a/packages/sdk/server-ai/examples/bedrock/src/index.ts b/packages/sdk/server-ai/examples/bedrock/src/index.ts new file mode 100644 index 0000000000..95bd83da60 --- /dev/null +++ b/packages/sdk/server-ai/examples/bedrock/src/index.ts @@ -0,0 +1,78 @@ +/* eslint-disable no-console */ +import { BedrockRuntimeClient, ConverseCommand, Message } from '@aws-sdk/client-bedrock-runtime'; + +import { init } from '@launchdarkly/node-server-sdk'; +import { initAi } from '@launchdarkly/server-sdk-ai'; + +const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY; +const aiConfigKey = process.env.LAUNCHDARKLY_AI_CONFIG_KEY || 'sample-ai-config'; +const awsClient = new BedrockRuntimeClient({ region: 'us-east-1' }); + +if (!sdkKey) { + console.error('*** Please set the LAUNCHDARKLY_SDK_KEY env first'); + process.exit(1); +} + +if (!aiConfigKey) { + console.error('*** Please set the LAUNCHDARKLY_AI_CONFIG_KEY env first'); + process.exit(1); +} + +const ldClient = init(sdkKey); + +// Set up the context properties +const context = { + kind: 'user', + key: 'example-user-key', + name: 'Sandy', +}; + +function mapPromptToConversation( + prompt: { role: 'user' | 'assistant' | 'system'; content: string }[], +): Message[] { + return prompt.map((item) => ({ + // Bedrock doesn't support systems in the converse command. + role: item.role !== 'system' ? item.role : 'user', + content: [{ text: item.content }], + })); +} + +async function main() { + try { + await ldClient.waitForInitialization({ timeout: 10 }); + console.log('*** SDK successfully initialized'); + } catch (error) { + console.log(`*** SDK failed to initialize: ${error}`); + process.exit(1); + } + + const aiClient = initAi(ldClient); + + const aiConfig = await aiClient.modelConfig( + aiConfigKey!, + context, + { + model: { + modelId: 'my-default-model', + }, + enabled: true, + }, + { + myVariable: 'My User Defined Variable', + }, + ); + const { tracker } = aiConfig; + + const completion = tracker.trackBedrockConverse( + await awsClient.send( + new ConverseCommand({ + modelId: aiConfig.config.model?.modelId ?? 'no-model', + messages: mapPromptToConversation(aiConfig.config.prompt ?? []), + }), + ), + ); + console.log('AI Response:', completion.output?.message?.content?.[0]?.text ?? 'no-response'); + console.log('Success.'); +} + +main(); diff --git a/packages/sdk/server-ai/examples/bedrock/tsconfig.eslint.json b/packages/sdk/server-ai/examples/bedrock/tsconfig.eslint.json new file mode 100644 index 0000000000..8241f86c36 --- /dev/null +++ b/packages/sdk/server-ai/examples/bedrock/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["/**/*.ts", "/**/*.tsx"], + "exclude": ["node_modules"] +} diff --git a/packages/sdk/server-ai/examples/bedrock/tsconfig.json b/packages/sdk/server-ai/examples/bedrock/tsconfig.json new file mode 100644 index 0000000000..5a491900d3 --- /dev/null +++ b/packages/sdk/server-ai/examples/bedrock/tsconfig.json @@ -0,0 +1,22 @@ +{ + "extends": "@tsconfig/node20/tsconfig.json", + "compilerOptions": { + "noEmit": false, + "outDir": "dist", + "baseUrl": ".", + "allowUnusedLabels": false, + "allowUnreachableCode": false, + "noFallthroughCasesInSwitch": true, + "noUncheckedIndexedAccess": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "sourceMap": true, + "resolveJsonModule": true, + "module": "CommonJS", + "moduleResolution": "Node" + }, + "include": ["src"], + "exclude": ["dist", "node_modules"] +} diff --git a/packages/sdk/server-ai/examples/openai/README.md b/packages/sdk/server-ai/examples/openai/README.md new file mode 100644 index 0000000000..0126c332ad --- /dev/null +++ b/packages/sdk/server-ai/examples/openai/README.md @@ -0,0 +1,50 @@ +# LaunchDarkly AI SDK for OpenAI Example + +This package demonstrates the integration of LaunchDarkly's AI SDK with OpenAI, allowing you to leverage LaunchDarkly's AI Config capabilities in AI-powered applications using OpenAI's services. + +## Installation and Build + + +When running as part of the js-core mono-repo the project will use local dependencies. +As such those dependencies need built. + +In the root of the repository run: + +```bash +yarn +``` + +And then + +```bash +yarn build +``` + +## Configuration + +Before running the example, make sure to set the following environment variables: + +- `LAUNCHDARKLY_SDK_KEY`: Your LaunchDarkly SDK key +- `LAUNCHDARKLY_AI_CONFIG_KEY`: Your LaunchDarkly AI configuration key (defaults to 'sample-ai-config' if not set) +- `OPENAI_API_KEY`: Your OpenAI API key + +## Usage + +The main script (`index.js`) demonstrates how to: + +1. Initialize the LaunchDarkly SDK +2. Set up a user context +3. Initialize the LaunchDarkly AI client +4. Retrieve an AI model configuration +5. Send a prompt to OpenAI +6. Track token usage + +To run the example (in the openai directory): + +```bash +yarn start +``` + +## Note + +This example uses OpenAI's chat completions API. Make sure your LaunchDarkly AI configuration is set up correctly to work with OpenAI's models and API structure. diff --git a/packages/sdk/server-ai/examples/openai/package.json b/packages/sdk/server-ai/examples/openai/package.json new file mode 100644 index 0000000000..6d4b74831a --- /dev/null +++ b/packages/sdk/server-ai/examples/openai/package.json @@ -0,0 +1,53 @@ +{ + "name": "@launchdarkly/hello-openai", + "version": "0.1.0", + "description": "LaunchDarkly AI SDK for Node.js", + "private": true, + "main": "dist/index.js", + "types": "dist/index.d.ts", + "scripts": { + "build": "tsc", + "start": "yarn build && node ./dist/index.js", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test" + }, + "keywords": [ + "launchdarkly", + "ai", + "llm" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "@launchdarkly/node-server-sdk": "^9.7.1", + "@launchdarkly/server-sdk-ai": "0.1.0", + "openai": "^4.58.1" + }, + "devDependencies": { + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@tsconfig/node20": "20.1.4", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.7.0", + "prettier": "^3.0.0", + "rimraf": "^5.0.5", + "typedoc": "0.25.0", + "typescript": "^5.5.3" + }, + "directories": { + "example": "example" + }, + "repository": { + "type": "git", + "url": "github.com/launchdarkly/js-core" + } +} diff --git a/packages/sdk/server-ai/examples/openai/src/index.ts b/packages/sdk/server-ai/examples/openai/src/index.ts new file mode 100644 index 0000000000..041d1f076e --- /dev/null +++ b/packages/sdk/server-ai/examples/openai/src/index.ts @@ -0,0 +1,72 @@ +/* eslint-disable no-console */ +import { OpenAI } from 'openai'; + +import { init, LDContext } from '@launchdarkly/node-server-sdk'; +import { initAi } from '@launchdarkly/server-sdk-ai'; + +// Environment variables +const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY; +const aiConfigKey = process.env.LAUNCHDARKLY_AI_CONFIG_KEY || 'sample-ai-config'; + +// Initialize OpenAI client +const client = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, // This is the default and can be omitted +}); + +// Validate required environment variables +if (!sdkKey) { + console.error('*** Please set the LAUNCHDARKLY_SDK_KEY env first'); + process.exit(1); +} + +if (!aiConfigKey) { + console.error('*** Please set the LAUNCHDARKLY_AI_CONFIG_KEY env first'); + process.exit(1); +} + +// Initialize LaunchDarkly client +const ldClient = init(sdkKey); + +// Set up the context properties. This context should appear on your LaunchDarkly contexts dashboard +// soon after you run the demo. +const context: LDContext = { + kind: 'user', + key: 'example-user-key', + name: 'Sandy', +}; + +async function main(): Promise { + try { + await ldClient.waitForInitialization({ timeout: 10 }); + console.log('*** SDK successfully initialized'); + } catch (error) { + console.log(`*** SDK failed to initialize: ${error}`); + process.exit(1); + } + + const aiClient = initAi(ldClient); + + const aiConfig = await aiClient.modelConfig( + aiConfigKey, + context, + { + model: { + modelId: 'gpt-4', + }, + }, + { myVariable: 'My User Defined Variable' }, + ); + + const { tracker } = aiConfig; + const completion = await tracker.trackOpenAI(async () => + client.chat.completions.create({ + messages: aiConfig.config.prompt || [], + model: aiConfig.config.model?.modelId || 'gpt-4', + }), + ); + + console.log('AI Response:', completion.choices[0]?.message.content); + console.log('Success.'); +} + +main(); diff --git a/packages/sdk/server-ai/examples/openai/tsconfig.eslint.json b/packages/sdk/server-ai/examples/openai/tsconfig.eslint.json new file mode 100644 index 0000000000..8241f86c36 --- /dev/null +++ b/packages/sdk/server-ai/examples/openai/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["/**/*.ts", "/**/*.tsx"], + "exclude": ["node_modules"] +} diff --git a/packages/sdk/server-ai/examples/openai/tsconfig.json b/packages/sdk/server-ai/examples/openai/tsconfig.json new file mode 100644 index 0000000000..5a491900d3 --- /dev/null +++ b/packages/sdk/server-ai/examples/openai/tsconfig.json @@ -0,0 +1,22 @@ +{ + "extends": "@tsconfig/node20/tsconfig.json", + "compilerOptions": { + "noEmit": false, + "outDir": "dist", + "baseUrl": ".", + "allowUnusedLabels": false, + "allowUnreachableCode": false, + "noFallthroughCasesInSwitch": true, + "noUncheckedIndexedAccess": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "sourceMap": true, + "resolveJsonModule": true, + "module": "CommonJS", + "moduleResolution": "Node" + }, + "include": ["src"], + "exclude": ["dist", "node_modules"] +} diff --git a/packages/sdk/server-ai/package.json b/packages/sdk/server-ai/package.json new file mode 100644 index 0000000000..2af0e192d7 --- /dev/null +++ b/packages/sdk/server-ai/package.json @@ -0,0 +1,48 @@ +{ + "name": "@launchdarkly/server-sdk-ai", + "version": "0.1.0", + "description": "LaunchDarkly AI SDK for Server-Side JavaScript", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "type": "commonjs", + "scripts": { + "build": "npx tsc", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test", + "test": "echo No tests added yet." + }, + "keywords": [ + "launchdarkly", + "ai", + "llm" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "mustache": "^4.2.0" + }, + "devDependencies": { + "@launchdarkly/js-server-sdk-common": "2.9.0", + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@types/jest": "^29.5.3", + "@types/mustache": "^4.2.5", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.6.1", + "prettier": "^3.0.0", + "ts-jest": "^29.1.1", + "typescript": "5.1.6" + }, + "peerDependencies": { + "@launchdarkly/js-server-sdk-common": "2.x" + } +} diff --git a/packages/sdk/server-ai/src/LDAIClientImpl.ts b/packages/sdk/server-ai/src/LDAIClientImpl.ts new file mode 100644 index 0000000000..dd0dead9d0 --- /dev/null +++ b/packages/sdk/server-ai/src/LDAIClientImpl.ts @@ -0,0 +1,68 @@ +import * as Mustache from 'mustache'; + +import { LDContext } from '@launchdarkly/js-server-sdk-common'; + +import { LDAIConfig, LDGenerationConfig, LDMessage, LDModelConfig } from './api/config'; +import { LDAIClient } from './api/LDAIClient'; +import { LDAIConfigTrackerImpl } from './LDAIConfigTrackerImpl'; +import { LDClientMin } from './LDClientMin'; + +/** + * Metadata assorted with a model configuration variation. + */ +interface LDMeta { + versionKey: string; + enabled: boolean; +} + +/** + * Interface for the model configuration variation returned by LaunchDarkly. This is the internal + * typing and not meant for exposure to the application developer. + */ +interface VariationContent { + model?: LDModelConfig; + prompt?: LDMessage[]; + _ldMeta?: LDMeta; +} + +export class LDAIClientImpl implements LDAIClient { + constructor(private _ldClient: LDClientMin) {} + + interpolateTemplate(template: string, variables: Record): string { + return Mustache.render(template, variables, undefined, { escape: (item: any) => item }); + } + + async modelConfig( + key: string, + context: LDContext, + defaultValue: TDefault, + variables?: Record, + ): Promise { + const value: VariationContent = await this._ldClient.variation(key, context, defaultValue); + // We are going to modify the contents before returning them, so we make a copy. + // This isn't a deep copy and the application developer should not modify the returned content. + const config: LDGenerationConfig = { ...value }; + const allVariables = { ...variables, ldctx: context }; + + if (value.prompt) { + config.prompt = value.prompt.map((entry: any) => ({ + ...entry, + content: this.interpolateTemplate(entry.content, allVariables), + })); + } + + return { + config, + // eslint-disable-next-line no-underscore-dangle + tracker: new LDAIConfigTrackerImpl( + this._ldClient, + key, + // eslint-disable-next-line no-underscore-dangle + value._ldMeta?.versionKey ?? '', + context, + ), + // eslint-disable-next-line no-underscore-dangle + enabled: !!value._ldMeta?.enabled, + }; + } +} diff --git a/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts new file mode 100644 index 0000000000..763d5bf112 --- /dev/null +++ b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts @@ -0,0 +1,102 @@ +import { LDContext } from '@launchdarkly/js-server-sdk-common'; + +import { LDAIConfigTracker } from './api/config'; +import { createBedrockTokenUsage, LDFeedbackKind, LDTokenUsage } from './api/metrics'; +import { createOpenAiUsage } from './api/metrics/OpenAiUsage'; +import { LDClientMin } from './LDClientMin'; + +export class LDAIConfigTrackerImpl implements LDAIConfigTracker { + constructor( + private _ldClient: LDClientMin, + private _configKey: string, + private _versionKey: string, + private _context: LDContext, + ) {} + + private _getTrackData(): { versionKey: string; configKey: string } { + return { + versionKey: this._versionKey, + configKey: this._configKey, + }; + } + + trackDuration(duration: number): void { + this._ldClient.track('$ld:ai:duration:total', this._context, this._getTrackData(), duration); + } + + async trackDurationOf(func: () => Promise): Promise { + const startTime = Date.now(); + const result = await func(); + const endTime = Date.now(); + const duration = endTime - startTime; // duration in milliseconds + this.trackDuration(duration); + return result; + } + + trackFeedback(feedback: { kind: LDFeedbackKind }): void { + if (feedback.kind === LDFeedbackKind.Positive) { + this._ldClient.track('$ld:ai:feedback:user:positive', this._context, this._getTrackData(), 1); + } else if (feedback.kind === LDFeedbackKind.Negative) { + this._ldClient.track('$ld:ai:feedback:user:negative', this._context, this._getTrackData(), 1); + } + } + + trackSuccess(): void { + this._ldClient.track('$ld:ai:generation', this._context, this._getTrackData(), 1); + } + + async trackOpenAI< + TRes extends { + usage?: { + total_tokens?: number; + prompt_tokens?: number; + completion_tokens?: number; + }; + }, + >(func: () => Promise): Promise { + const result = await this.trackDurationOf(func); + this.trackSuccess(); + if (result.usage) { + this.trackTokens(createOpenAiUsage(result.usage)); + } + return result; + } + + trackBedrockConverse< + TRes extends { + $metadata: { httpStatusCode?: number }; + metrics?: { latencyMs?: number }; + usage?: { + inputTokens?: number; + outputTokens?: number; + totalTokens?: number; + }; + }, + >(res: TRes): TRes { + if (res.$metadata?.httpStatusCode === 200) { + this.trackSuccess(); + } else if (res.$metadata?.httpStatusCode && res.$metadata.httpStatusCode >= 400) { + // Potentially add error tracking in the future. + } + if (res.metrics && res.metrics.latencyMs) { + this.trackDuration(res.metrics.latencyMs); + } + if (res.usage) { + this.trackTokens(createBedrockTokenUsage(res.usage)); + } + return res; + } + + trackTokens(tokens: LDTokenUsage): void { + const trackData = this._getTrackData(); + if (tokens.total > 0) { + this._ldClient.track('$ld:ai:tokens:total', this._context, trackData, tokens.total); + } + if (tokens.input > 0) { + this._ldClient.track('$ld:ai:tokens:input', this._context, trackData, tokens.input); + } + if (tokens.output > 0) { + this._ldClient.track('$ld:ai:tokens:output', this._context, trackData, tokens.output); + } + } +} diff --git a/packages/sdk/server-ai/src/LDClientMin.ts b/packages/sdk/server-ai/src/LDClientMin.ts new file mode 100644 index 0000000000..2158c569cc --- /dev/null +++ b/packages/sdk/server-ai/src/LDClientMin.ts @@ -0,0 +1,16 @@ +import { LDContext, LDFlagValue } from '@launchdarkly/js-server-sdk-common'; + +/** + * Interface which represents the required interface components for a sever SDK + * to work with the AI SDK. + */ +export interface LDClientMin { + variation( + key: string, + context: LDContext, + defaultValue: LDFlagValue, + callback?: (err: any, res: LDFlagValue) => void, + ): Promise; + + track(key: string, context: LDContext, data?: any, metricValue?: number): void; +} diff --git a/packages/sdk/server-ai/src/api/LDAIClient.ts b/packages/sdk/server-ai/src/api/LDAIClient.ts new file mode 100644 index 0000000000..ceaf936af3 --- /dev/null +++ b/packages/sdk/server-ai/src/api/LDAIClient.ts @@ -0,0 +1,70 @@ +import { LDContext } from '@launchdarkly/js-server-sdk-common'; + +import { LDAIConfig, LDGenerationConfig } from './config/LDAIConfig'; + +/** + * Interface for performing AI operations using LaunchDarkly. + */ + +export interface LDAIClient { + /** + * Parses and interpolates a template string with the provided variables. + * + * @param template - The template string to be parsed and interpolated. + * @param variables - An object containing the variables to be used for interpolation. + * @returns The interpolated string. + */ + interpolateTemplate(template: string, variables: Record): string; + + /** + * Retrieves and processes a prompt template based on the provided key, LaunchDarkly context, and + * variables. + * + * @param key - A unique identifier for the prompt template. This key is used to fetch the correct + * prompt from storage or configuration. + * @param context - The LaunchDarkly context object that contains relevant information about the + * current environment, user, or session. This context may influence how the prompt is processed + * or personalized. + * @param variables - A map of key-value pairs representing dynamic variables to be injected into + * the prompt template. The keys correspond to placeholders within the template, and the values + * are the corresponding replacements. + * @param defaultValue - A fallback value to be used if the prompt template associated with the + * key is not found or if any errors occur during processing. + * + * @returns The processed prompt after all variables have been substituted in the stored prompt + * template. If the prompt cannot be retrieved or processed, the `defaultValue` is returned. + * + * @example + * ``` + * const key = "welcome_prompt"; + * const context = {...}; + * const variables = {username: 'john'}; + * const defaultValue = {}; + * + * const result = modelConfig(key, context, defaultValue, variables); + * // Output: + * { + * modelId: "gpt-4o", + * temperature: 0.2, + * maxTokens: 4096, + * userDefinedKey: "myValue", + * prompt: [ + * { + * role: "system", + * content: "You are an amazing GPT." + * }, + * { + * role: "user", + * content: "Explain how you're an amazing GPT." + * } + * ] + * } + * ``` + */ + modelConfig( + key: string, + context: LDContext, + defaultValue: TDefault, + variables?: Record, + ): Promise; +} diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfig.ts b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts new file mode 100644 index 0000000000..0d47f6eac0 --- /dev/null +++ b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts @@ -0,0 +1,64 @@ +import { LDAIConfigTracker } from './LDAIConfigTracker'; + +/** + * Configuration related to the model. + */ +export interface LDModelConfig { + /** + * The ID of the model. + */ + modelId?: string; + + /** + * And additional model specific information. + */ + [index: string]: unknown; +} + +/** + * Information about prompts. + */ +export interface LDMessage { + /** + * The role of the prompt. + */ + role: 'user' | 'assistant' | 'system'; + /** + * Content for the prompt. + */ + content: string; +} + +/** + * Configuration which affects generation. + */ +export interface LDGenerationConfig { + /** + * Optional model configuration. + */ + model?: LDModelConfig; + /** + * Optional prompt data. + */ + prompt?: LDMessage[]; +} + +/** + * AI Config value and tracker. + */ +export interface LDAIConfig { + /** + * The result of the AI Config customization. + */ + config: LDGenerationConfig; + + /** + * A tracker which can be used to generate analytics. + */ + tracker: LDAIConfigTracker; + + /** + * Whether the configuration is enabled. + */ + enabled: boolean; +} diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts new file mode 100644 index 0000000000..505153f8e3 --- /dev/null +++ b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts @@ -0,0 +1,79 @@ +import { LDFeedbackKind, LDTokenUsage } from '../metrics'; + +/** + * The LDAIConfigTracker is used to track various details about AI operations. + */ +export interface LDAIConfigTracker { + /** + * Track the duration of generation. + * + * Ideally this would not include overhead time such as network communication. + * + * @param durationMs The duration in milliseconds. + */ + trackDuration(durationMs: number): void; + + /** + * Track information about token usage. + * + * @param tokens Token usage information. + */ + trackTokens(tokens: LDTokenUsage): void; + + /** + * Generation was successful. + */ + trackSuccess(): void; + + /** + * Track sentiment about the generation. + * + * @param feedback Feedback about the generation. + */ + trackFeedback(feedback: { kind: LDFeedbackKind }): void; + + /** + * Track the duration of execution of the provided function. + * @param func The function to track the duration of. + * @returns The result of the function. + */ + trackDurationOf(func: () => Promise): Promise; + + /** + * Track an OpenAI operation. + * + * @param func Function which executes the operation. + * @returns The result of the operation. + */ + trackOpenAI< + TRes extends { + usage?: { + total_tokens?: number; + prompt_tokens?: number; + completion_tokens?: number; + }; + }, + >( + func: () => Promise, + ): Promise; + + /** + * Track an operation which uses Bedrock. + * + * @param res The result of the Bedrock operation. + * @returns The input operation. + */ + trackBedrockConverse< + TRes extends { + $metadata: { httpStatusCode?: number }; + metrics?: { latencyMs?: number }; + usage?: { + inputTokens?: number; + outputTokens?: number; + totalTokens?: number; + }; + }, + >( + res: TRes, + ): TRes; +} diff --git a/packages/sdk/server-ai/src/api/config/index.ts b/packages/sdk/server-ai/src/api/config/index.ts new file mode 100644 index 0000000000..1c07d5c3a4 --- /dev/null +++ b/packages/sdk/server-ai/src/api/config/index.ts @@ -0,0 +1,2 @@ +export * from './LDAIConfig'; +export { LDAIConfigTracker } from './LDAIConfigTracker'; diff --git a/packages/sdk/server-ai/src/api/index.ts b/packages/sdk/server-ai/src/api/index.ts new file mode 100644 index 0000000000..c6c70867bb --- /dev/null +++ b/packages/sdk/server-ai/src/api/index.ts @@ -0,0 +1,3 @@ +export * from './config'; +export * from './metrics'; +export * from './LDAIClient'; diff --git a/packages/sdk/server-ai/src/api/metrics/BedrockTokenUsage.ts b/packages/sdk/server-ai/src/api/metrics/BedrockTokenUsage.ts new file mode 100644 index 0000000000..0b7fc40e55 --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/BedrockTokenUsage.ts @@ -0,0 +1,13 @@ +import { LDTokenUsage } from './LDTokenUsage'; + +export function createBedrockTokenUsage(data: { + totalTokens?: number; + inputTokens?: number; + outputTokens?: number; +}): LDTokenUsage { + return { + total: data.totalTokens || 0, + input: data.inputTokens || 0, + output: data.outputTokens || 0, + }; +} diff --git a/packages/sdk/server-ai/src/api/metrics/LDFeedbackKind.ts b/packages/sdk/server-ai/src/api/metrics/LDFeedbackKind.ts new file mode 100644 index 0000000000..d066241de8 --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/LDFeedbackKind.ts @@ -0,0 +1,13 @@ +/** + * Feedback about the generated content. + */ +export enum LDFeedbackKind { + /** + * The sentiment was positive. + */ + Positive = 'positive', + /** + * The sentiment is negative. + */ + Negative = 'negative', +} diff --git a/packages/sdk/server-ai/src/api/metrics/LDTokenUsage.ts b/packages/sdk/server-ai/src/api/metrics/LDTokenUsage.ts new file mode 100644 index 0000000000..375cbbb625 --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/LDTokenUsage.ts @@ -0,0 +1,19 @@ +/** + * Information about token usage. + */ +export interface LDTokenUsage { + /** + * Combined token usage. + */ + total: number; + + /** + * Number of tokens in the input. + */ + input: number; + + /** + * Number of tokens in the output. + */ + output: number; +} diff --git a/packages/sdk/server-ai/src/api/metrics/OpenAiUsage.ts b/packages/sdk/server-ai/src/api/metrics/OpenAiUsage.ts new file mode 100644 index 0000000000..b2316087e3 --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/OpenAiUsage.ts @@ -0,0 +1,13 @@ +import { LDTokenUsage } from './LDTokenUsage'; + +export function createOpenAiUsage(data: { + total_tokens?: number; + prompt_tokens?: number; + completion_tokens?: number; +}): LDTokenUsage { + return { + total: data.total_tokens ?? 0, + input: data.prompt_tokens ?? 0, + output: data.completion_tokens ?? 0, + }; +} diff --git a/packages/sdk/server-ai/src/api/metrics/UnderScoreTokenUsage.ts b/packages/sdk/server-ai/src/api/metrics/UnderScoreTokenUsage.ts new file mode 100644 index 0000000000..201d95aa94 --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/UnderScoreTokenUsage.ts @@ -0,0 +1,9 @@ +import { LDTokenUsage } from './LDTokenUsage'; + +export function createUnderscoreTokenUsage(data: any): LDTokenUsage { + return { + total: data.total_tokens || 0, + input: data.prompt_tokens || 0, + output: data.completion_tokens || 0, + }; +} diff --git a/packages/sdk/server-ai/src/api/metrics/index.ts b/packages/sdk/server-ai/src/api/metrics/index.ts new file mode 100644 index 0000000000..4bebfd1167 --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/index.ts @@ -0,0 +1,4 @@ +export * from './BedrockTokenUsage'; +export * from './LDFeedbackKind'; +export * from './LDTokenUsage'; +export * from './UnderScoreTokenUsage'; diff --git a/packages/sdk/server-ai/src/index.ts b/packages/sdk/server-ai/src/index.ts new file mode 100644 index 0000000000..3f762c5fd0 --- /dev/null +++ b/packages/sdk/server-ai/src/index.ts @@ -0,0 +1,14 @@ +import { LDAIClient } from './api/LDAIClient'; +import { LDAIClientImpl } from './LDAIClientImpl'; +import { LDClientMin } from './LDClientMin'; + +/** + * Initialize a new AI client. This client will be used to perform any AI operations. + * @param ldClient The base LaunchDarkly client. + * @returns A new AI client. + */ +export function initAi(ldClient: LDClientMin): LDAIClient { + return new LDAIClientImpl(ldClient); +} + +export * from './api'; diff --git a/packages/sdk/server-ai/tsconfig.eslint.json b/packages/sdk/server-ai/tsconfig.eslint.json new file mode 100644 index 0000000000..56c9b38305 --- /dev/null +++ b/packages/sdk/server-ai/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/packages/sdk/server-ai/tsconfig.json b/packages/sdk/server-ai/tsconfig.json new file mode 100644 index 0000000000..2d1f62a18a --- /dev/null +++ b/packages/sdk/server-ai/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "target": "es2017", + "lib": ["es6"], + "module": "commonjs", + "strict": true, + "noImplicitOverride": true, + // Needed for CommonJS modules. + "allowSyntheticDefaultImports": true, + "sourceMap": true, + "declaration": true, + "declarationMap": true, // enables importers to jump to source + "resolveJsonModule": true, + "stripInternal": true, + "moduleResolution": "node" + }, + "include": ["src"], + "exclude": ["**/*.test.ts", "dist", "node_modules", "__tests__"] +} diff --git a/packages/sdk/server-ai/tsconfig.ref.json b/packages/sdk/server-ai/tsconfig.ref.json new file mode 100644 index 0000000000..0c86b2c554 --- /dev/null +++ b/packages/sdk/server-ai/tsconfig.ref.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "include": ["src/**/*"], + "compilerOptions": { + "composite": true + } +} diff --git a/packages/sdk/server-ai/typedoc.json b/packages/sdk/server-ai/typedoc.json new file mode 100644 index 0000000000..7ac616b544 --- /dev/null +++ b/packages/sdk/server-ai/typedoc.json @@ -0,0 +1,5 @@ +{ + "extends": ["../../../typedoc.base.json"], + "entryPoints": ["src/index.ts"], + "out": "docs" +} diff --git a/release-please-config.json b/release-please-config.json index 013be3c00b..e19f0a8453 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -31,6 +31,19 @@ "packages/telemetry/node-server-sdk-otel": {}, "packages/sdk/browser": { "bump-minor-pre-major": true + }, + "packages/sdk/server-ai": { + "bump-minor-pre-major": true, + "release-as": "0.1.0", + "extra-files": [{ + "type": "json", + "path": "examples/bedrock/package.json", + "jsonpath": "$.dependencies.@launchdarkly/server-sdk-ai" + },{ + "type": "json", + "path": "examples/openai/package.json", + "jsonpath": "$.dependencies.@launchdarkly/server-sdk-ai" + }] } }, "plugins": [ diff --git a/tsconfig.json b/tsconfig.json index e7ffa9fe3b..0215fb8fb3 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -60,6 +60,9 @@ }, { "path": "./packages/sdk/browser/contract-tests/entity/tsconfig.ref.json" + }, + { + "path": "./packages/sdk/server-ai/tsconfig.ref.json" } ] }