Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .github/workflows/server-ai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ on:
- '**.md'

jobs:
build-test-node-server-otel:
build-test-server-sdk-ai:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
Expand All @@ -35,3 +35,8 @@ jobs:
yarn workspaces focus @launchdarkly/hello-openai
yarn workspace @launchdarkly/hello-openai lint
yarn workspaces foreach -pR --topological-dev --from '@launchdarkly/hello-openai' run build
- name: Build Vercel AI example
run: |
yarn workspaces focus @launchdarkly/hello-vercel-ai
yarn workspace @launchdarkly/hello-vercel-ai lint
yarn workspaces foreach -pR --topological-dev --from '@launchdarkly/hello-vercel-ai' run build
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
"packages/sdk/server-ai",
"packages/sdk/server-ai/examples/bedrock",
"packages/sdk/server-ai/examples/openai",
"packages/sdk/server-ai/examples/vercel-ai",
"packages/telemetry/browser-telemetry",
"contract-tests",
"packages/sdk/combined-browser"
Expand Down
49 changes: 49 additions & 0 deletions packages/sdk/server-ai/examples/vercel-ai/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# LaunchDarkly AI SDK for OpenAI Example

This package demonstrates the integration of LaunchDarkly's AI SDK with OpenAI via Vercel AI, allowing you to leverage LaunchDarkly's AI Config capabilities in AI-powered applications using Vercel's services.

## Installation and Build

When running as part of the js-core mono-repo the project will use local dependencies.
As such those dependencies need built.

In the root of the repository run:

```bash
yarn
```

And then

```bash
yarn build
```

## Configuration

Before running the example, make sure to set the following environment variables:

- `LAUNCHDARKLY_SDK_KEY`: Your LaunchDarkly SDK key
- `LAUNCHDARKLY_AI_CONFIG_KEY`: Your LaunchDarkly AI Config key (defaults to 'sample-ai-config' if not set)
- `OPENAI_API_KEY`: Your OpenAI API key

## Usage

The main script (`index.js`) demonstrates how to:

1. Initialize the LaunchDarkly SDK
2. Set up a user context
3. Initialize the LaunchDarkly AI client
4. Retrieve an AI model configuration
5. Send a prompt to a Vercel AI Model (OpenAI)
6. Track token usage

To run the example (in the vercel-ai directory):

```bash
yarn start
```

## Note

This example uses OpenAI's chat completions API. Make sure your LaunchDarkly AI Config is set up correctly to work with OpenAI's models and API structure.
46 changes: 46 additions & 0 deletions packages/sdk/server-ai/examples/vercel-ai/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
{
"name": "@launchdarkly/hello-vercel-ai",
"version": "0.1.0",
"description": "LaunchDarkly AI SDK for Node.js with Vercel AI",
"private": true,
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"build": "tsc",
"start": "yarn build && node ./dist/index.js",
"lint": "npx eslint . --ext .ts",
"prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore",
"lint:fix": "yarn run lint --fix",
"check": "yarn prettier && yarn lint && yarn build && yarn test"
},
"keywords": [
"launchdarkly",
"ai",
"llm"
],
"author": "LaunchDarkly",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/openai": "2.0.30",
"@launchdarkly/node-server-sdk": "9.7.1",
"@launchdarkly/server-sdk-ai": "0.11.3",
"ai": "5.0.0",
"dotenv": "16.5.0",
"lodash": "4.17.21",
"openai": "4.96.2",
"zod": "^4.1.8"
},
"devDependencies": {
"@tsconfig/node20": "20.1.4",
"@types/lodash": "4.17.16",
"@types/node": "22.15.3",
"typescript": "5.8.3"
},
"directories": {
"example": "example"
},
"repository": {
"type": "git",
"url": "github.com/launchdarkly/js-core"
}
}
85 changes: 85 additions & 0 deletions packages/sdk/server-ai/examples/vercel-ai/src/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
/* eslint-disable no-console */
import { openai } from '@ai-sdk/openai';
import { generateText, streamText } from 'ai';

import { init, type LDClient, type LDContext } from '@launchdarkly/node-server-sdk';
import { initAi } from '@launchdarkly/server-sdk-ai';

// Environment variables
const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY ?? '';
const aiConfigKey = process.env.LAUNCHDARKLY_AI_CONFIG_KEY || 'sample-ai-config';

// Validate required environment variables
if (!sdkKey) {
console.error('*** Please set the LAUNCHDARKLY_SDK_KEY env first');
process.exit(1);
}

let client: LDClient | undefined;

async function main() {
// Initialize LaunchDarkly client
client = init(sdkKey);

// Set up the context properties. This context should appear on your LaunchDarkly contexts dashboard
const context: LDContext = {
kind: 'user',
key: 'example-user-key',
name: 'Sandy',
};

try {
await client.waitForInitialization({ timeout: 10 });
console.log('*** SDK successfully initialized');
} catch (error) {
console.log(`*** SDK failed to initialize: ${error}`);
process.exit(1);
}

const aiClient = initAi(client);

// Get AI configuration from LaunchDarkly
const aiConfig = await aiClient.config(aiConfigKey, context, { model: { name: 'gpt-4' } });

if (!aiConfig.enabled) {
console.log('*** AI configuration is not enabled');
process.exit(0);
}

console.log('Using model:', aiConfig.model?.name);

// Example of using generateText (non-streaming)
console.log('\n*** Generating text:');
try {
const userMessage = {
role: 'user' as const,
content: 'What can you help me with?',
};

const result = await aiConfig.tracker.trackVercelAISDKGenerateTextMetrics(() =>
generateText(aiConfig.toVercelAISDK(openai, { nonInterpolatedMessages: [userMessage] })),
);
console.log('Response:', result.text);

process.stdout.write('Streaming Response: ');
const streamResult = aiConfig.tracker.trackVercelAISDKStreamTextMetrics(() =>
streamText(aiConfig.toVercelAISDK(openai, { nonInterpolatedMessages: [userMessage] })),
);

// eslint-disable-next-line no-restricted-syntax
for await (const textPart of streamResult.textStream) {
process.stdout.write(textPart);
}

console.log('\nSuccess.');
} catch (err) {
console.error('Error:', err);
}
}

main()
.catch((e) => console.error(e))
.finally(async () => {
await client?.flush();
client?.close();
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"extends": "./tsconfig.json",
"include": ["/**/*.ts", "/**/*.tsx"],
"exclude": ["node_modules"]
}
22 changes: 22 additions & 0 deletions packages/sdk/server-ai/examples/vercel-ai/tsconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"extends": "@tsconfig/node20/tsconfig.json",
"compilerOptions": {
"noEmit": false,
"outDir": "dist",
"baseUrl": ".",
"allowUnusedLabels": false,
"allowUnreachableCode": false,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"forceConsistentCasingInFileNames": true,
"declaration": true,
"sourceMap": true,
"resolveJsonModule": true,
"module": "CommonJS",
"moduleResolution": "Node"
},
"include": ["src"],
"exclude": ["dist", "node_modules"]
}
Loading