Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,8 @@ yarn-error.log
dump.rdb
.wrangler
stats.html

# Environment files
.env
.env.local
.env.*.local
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
"packages/sdk/browser/contract-tests/adapter",
"packages/sdk/server-ai",
"packages/sdk/server-ai/examples/bedrock",
"packages/sdk/server-ai/examples/judge-evaluation",
"packages/sdk/server-ai/examples/openai",
"packages/sdk/server-ai/examples/tracked-chat",
"packages/sdk/server-ai/examples/vercel-ai",
Expand Down
16 changes: 16 additions & 0 deletions packages/sdk/server-ai/examples/judge-evaluation/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# LaunchDarkly SDK Key (required)
# Get this from your LaunchDarkly account settings
LAUNCHDARKLY_SDK_KEY=your-launchdarkly-sdk-key-here

# AI Config Key for the main chat configuration (optional)
# This is the key of your AI Config in LaunchDarkly
LAUNCHDARKLY_AI_CONFIG_KEY=sample-ai-config

# Judge Config Key for evaluation (optional)
# This is the key of your Judge AI Config in LaunchDarkly
LAUNCHDARKLY_JUDGE_KEY=ld-ai-judge-accuracy

# OpenAI API Key (required if using OpenAI provider)
# Get this from https://platform.openai.com/api-keys
OPENAI_API_KEY=your-openai-api-key-here

72 changes: 72 additions & 0 deletions packages/sdk/server-ai/examples/judge-evaluation/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# LaunchDarkly AI SDK Judge Evaluation Example

This package demonstrates the integration of LaunchDarkly's AI SDK Judge functionality for evaluating AI responses using AI Configs with `mode: "judge"`.

## Installation and Build

When running as part of the js-core mono-repo the project will use local dependencies.
As such those dependencies need built.

In the root of the repository run:

```bash
yarn
```

And then

```bash
yarn build
```

## AI Config Setup

Make sure you have an AI Config configured in LaunchDarkly with `mode: "judge"`:

1. Install Judges in your AI Configs
1. Create an AI Config in LaunchDarkly:
- Navigate to the AI Configs section in your LaunchDarkly dashboard
- Create a new AI Config with the key `sample-ai-config`
- Add a variation with the following settings:
- **Model Selection**: Select "OpenAI" as the provider and "gpt-3.5-turbo" as the model
- **Messages**: Add a system message with the content: "You are a helpful assistant for {{companyName}}. You should be friendly and informative."
- Save the variation
- Update the default target rule to use the newly created variation
- Attach one or more judges to your config

## Configuration

Before running the example, make sure to set the following environment variables:

1. Copy the example environment file:
```bash
cp .env.example .env
```

2. Edit `.env` and set the following environment variables:
- `LAUNCHDARKLY_SDK_KEY`: Your LaunchDarkly SDK key (required)
- `LAUNCHDARKLY_AI_CONFIG_KEY`: Your AI Config key (defaults to 'sample-ai-config')
- `LAUNCHDARKLY_JUDGE_KEY`: Your judge AI Config key (defaults to 'ld-ai-judge-accuracy')
- `OPENAI_API_KEY`: Your OpenAI API key (required if using OpenAI provider)

## Usage

The main script (`index.js`) demonstrates how to:

1. Initialize the LaunchDarkly SDK
1. Set up a user context
1. Initialize the LaunchDarkly AI client
1. Create a chat for an AI Config with attached judges
1. Create a judge for direct evaluation
1. Evaluate AI text using the `evaluate()` method
1. Handle evaluation results and errors

To run the example (in the judge-evaluation directory):

```bash
yarn start
```

## Note

This example uses the Judge functionality to evaluate AI responses. Make sure your LaunchDarkly AI Configs are set up correctly with `mode: "judge"` and include the necessary evaluation prompts and metrics.
37 changes: 37 additions & 0 deletions packages/sdk/server-ai/examples/judge-evaluation/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
{
"name": "@launchdarkly/server-sdk-ai-judge",
"version": "1.0.0",
"description": "Example demonstrating LaunchDarkly AI SDK judge functionality for evaluating AI responses",
"type": "module",
"scripts": {
"build": "tsc",
"lint": "npx eslint . --ext .ts",
"start": "yarn build && node ./dist/index.js"
},
"dependencies": {
"@launchdarkly/node-server-sdk": "^9.0.0",
"@launchdarkly/server-sdk-ai": "^0.12.3",
"@launchdarkly/server-sdk-ai-langchain": "^0.1.0",
"@launchdarkly/server-sdk-ai-openai": "^0.1.0",
"@launchdarkly/server-sdk-ai-vercel": "^0.1.0",
"dotenv": "^16.0.0"
},
"devDependencies": {
"@trivago/prettier-plugin-sort-imports": "^4.1.1",
"@tsconfig/node20": "20.1.4",
"@typescript-eslint/eslint-plugin": "^6.20.0",
"@typescript-eslint/parser": "^6.20.0",
"eslint": "^8.45.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-airbnb-typescript": "^17.1.0",
"eslint-config-prettier": "^8.8.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-jest": "^27.6.3",
"eslint-plugin-prettier": "^5.0.0",
"jest": "^29.7.0",
"prettier": "^3.0.0",
"rimraf": "^5.0.5",
"typedoc": "0.25.0",
"typescript": "^5.5.3"
}
}
106 changes: 106 additions & 0 deletions packages/sdk/server-ai/examples/judge-evaluation/src/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
/* eslint-disable no-console */
import dotenv from 'dotenv';

import { init, type LDContext } from '@launchdarkly/node-server-sdk';
import { initAi } from '@launchdarkly/server-sdk-ai';

dotenv.config({ override: true });

// Environment variables
const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY;
const aiConfigKey = process.env.LAUNCHDARKLY_AI_CONFIG_KEY || 'sample-ai-config';
const judgeKey = process.env.LAUNCHDARKLY_JUDGE_KEY || 'ld-ai-judge-accuracy';

// Validate required environment variables
if (!sdkKey) {
console.error('*** Please set the LAUNCHDARKLY_SDK_KEY env first');
process.exit(1);
}

// Initialize LaunchDarkly client
const ldClient = init(sdkKey);

// Set up the context properties. This context should appear on your LaunchDarkly contexts dashboard
// soon after you run the demo.
const context: LDContext = {
kind: 'user',
key: 'example-user-key',
name: 'Sandy',
};

async function main(): Promise<void> {
try {
await ldClient.waitForInitialization({ timeout: 10 });
console.log('*** SDK successfully initialized');
} catch (error) {
console.log(`*** SDK failed to initialize: ${error}`);
process.exit(1);
}

const aiClient = initAi(ldClient);

try {
// Example using the chat functionality which automates the judge evaluation
const defaultValue = {
enabled: false,
};

const chat = await aiClient.createChat(aiConfigKey, context, defaultValue, {
company_name: 'LaunchDarkly',
});

if (!chat) {
console.log('*** AI chat configuration is not enabled');
process.exit(0);
}

console.log('\n*** Starting chat:');
const userInput = 'How can LaunchDarkly help me?';
console.log('User Input:', userInput);

// The invoke method will automatically evaluate the chat response with any judges defined in the AI config
const chatResponse = await chat.invoke(userInput);
console.log('Chat Response:', chatResponse.message.content);

// Log judge evaluation results with full detail
const evalResults = await chatResponse.evaluations;
console.log('Judge results:', JSON.stringify(evalResults, null, 2));

// Example of using the judge functionality with direct input and output
// Get AI judge configuration from LaunchDarkly
const judge = await aiClient.createJudge(
judgeKey,
context,
{ enabled: false },
undefined,
'langchain',
);

if (!judge) {
console.log('*** AI judge configuration is not enabled');
process.exit(0);
}

console.log('\n*** Starting judge evaluation of direct input and output:');
const input = 'You are a helpful assistant for the company LaunchDarkly. How can you help me?';
const output =
'I can answer any question you have except for questions about the company LaunchDarkly.';

console.log('Input:', input);
console.log('Output:', output);

const judgeResponse = await judge.evaluate(input, output);

// Track the judge evaluation scores on the tracker for the aiConfig you are evaluating
// Example:
// aiConfig.tracker.trackEvalScores(judgeResponse?.evals);

console.log('Judge Response:', judgeResponse);

console.log('Success.');
} catch (err) {
console.error('Error:', err);
}
}

main();
18 changes: 18 additions & 0 deletions packages/sdk/server-ai/examples/judge-evaluation/tsconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "node",
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"strict": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"outDir": "./dist",
"rootDir": "./src",
"declaration": true,
"sourceMap": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}
1 change: 1 addition & 0 deletions packages/sdk/server-ai/examples/openai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
"dependencies": {
"@launchdarkly/node-server-sdk": "^9.7.1",
"@launchdarkly/server-sdk-ai": "0.13.0",
"@launchdarkly/server-sdk-ai-openai": "^0.1.0",
"openai": "^4.58.1"
},
"devDependencies": {
Expand Down
19 changes: 11 additions & 8 deletions packages/sdk/server-ai/examples/openai/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { OpenAI } from 'openai';

import { init, LDContext } from '@launchdarkly/node-server-sdk';
import { initAi } from '@launchdarkly/server-sdk-ai';
import { OpenAIProvider } from '@launchdarkly/server-sdk-ai-openai';

// Environment variables
const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY;
Expand Down Expand Up @@ -46,7 +47,7 @@ async function main(): Promise<void> {

const aiClient = initAi(ldClient);

const aiConfig = await aiClient.config(
const aiConfig = await aiClient.completionConfig(
aiConfigKey,
context,
{
Expand All @@ -63,13 +64,15 @@ async function main(): Promise<void> {
process.exit(0);
}

const completion = await aiConfig.tracker.trackOpenAIMetrics(async () =>
client.chat.completions.create({
messages: aiConfig.messages || [],
model: aiConfig.model?.name || 'gpt-4',
temperature: (aiConfig.model?.parameters?.temperature as number) ?? 0.5,
max_tokens: (aiConfig.model?.parameters?.maxTokens as number) ?? 4096,
}),
const completion = await aiConfig.tracker.trackMetricsOf(
OpenAIProvider.createAIMetrics,
async () =>
client.chat.completions.create({
messages: aiConfig.messages || [],
model: aiConfig.model?.name || 'gpt-4',
temperature: (aiConfig.model?.parameters?.temperature as number) ?? 0.5,
max_tokens: (aiConfig.model?.parameters?.maxTokens as number) ?? 4096,
}),
);

console.log('AI Response:', completion.choices[0]?.message.content);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ async function main(): Promise<void> {
// };

// Get AI chat configuration from LaunchDarkly
const chat = await aiClient.initChat(aiConfigKey, context, defaultValue, {
const chat = await aiClient.createChat(aiConfigKey, context, defaultValue, {
companyName: 'LaunchDarkly',
});

Expand Down
3 changes: 1 addition & 2 deletions packages/sdk/server-ai/examples/vercel-ai/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ async function main() {
const aiClient = initAi(client);

// Get AI configuration from LaunchDarkly
const aiConfig = await aiClient.config(aiConfigKey, context, {
model: { name: 'gpt-4' },
const aiConfig = await aiClient.completionConfig(aiConfigKey, context, {
enabled: false,
});

Expand Down
5 changes: 5 additions & 0 deletions release-please-config.json
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,11 @@
"type": "json",
"path": "examples/vercel-ai/package.json",
"jsonpath": "$.dependencies['@launchdarkly/server-sdk-ai']"
},
{
"type": "json",
"path": "examples/judge-evaluation/package.json",
"jsonpath": "$.dependencies['@launchdarkly/server-sdk-ai']"
}
]
},
Expand Down