Skip to content

Commit 1f3f54a

Browse files
authored
feat: Change the typing for the LDAIConfig. (#688)
Flatten the AI config moving model/prompt to the top level. Change the LDAIDefaults to match this change. Remove the generation config type which was the type of the `config` field. Add temperature and maxTokens as optionals to the model config. Make `modelId` required. Updated the examples to include temperature and maxTokens. SDK-912
1 parent 4cf34f9 commit 1f3f54a

File tree

6 files changed

+64
-66
lines changed

6 files changed

+64
-66
lines changed

packages/sdk/server-ai/__tests__/LDAIClientImpl.test.ts

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { LDContext } from '@launchdarkly/js-server-sdk-common';
22

3-
import { LDGenerationConfig } from '../src/api/config';
3+
import { LDAIDefaults } from '../src/api/config';
44
import { LDAIClientImpl } from '../src/LDAIClientImpl';
55
import { LDClientMin } from '../src/LDClientMin';
66

@@ -32,13 +32,14 @@ it('handles empty variables in template interpolation', () => {
3232
it('returns model config with interpolated prompts', async () => {
3333
const client = new LDAIClientImpl(mockLdClient);
3434
const key = 'test-flag';
35-
const defaultValue: LDGenerationConfig = {
35+
const defaultValue: LDAIDefaults = {
3636
model: { modelId: 'test', name: 'test-model' },
3737
prompt: [],
38+
enabled: true,
3839
};
3940

4041
const mockVariation = {
41-
model: { modelId: 'example-provider', name: 'imagination' },
42+
model: { modelId: 'example-provider', name: 'imagination', temperature: 0.7, maxTokens: 4096 },
4243
prompt: [
4344
{ role: 'system', content: 'Hello {{name}}' },
4445
{ role: 'user', content: 'Score: {{score}}' },
@@ -55,13 +56,11 @@ it('returns model config with interpolated prompts', async () => {
5556
const result = await client.modelConfig(key, testContext, defaultValue, variables);
5657

5758
expect(result).toEqual({
58-
config: {
59-
model: { modelId: 'example-provider', name: 'imagination' },
60-
prompt: [
61-
{ role: 'system', content: 'Hello John' },
62-
{ role: 'user', content: 'Score: 42' },
63-
],
64-
},
59+
model: { modelId: 'example-provider', name: 'imagination', temperature: 0.7, maxTokens: 4096 },
60+
prompt: [
61+
{ role: 'system', content: 'Hello John' },
62+
{ role: 'user', content: 'Score: 42' },
63+
],
6564
tracker: expect.any(Object),
6665
enabled: true,
6766
});
@@ -70,7 +69,7 @@ it('returns model config with interpolated prompts', async () => {
7069
it('includes context in variables for prompt interpolation', async () => {
7170
const client = new LDAIClientImpl(mockLdClient);
7271
const key = 'test-flag';
73-
const defaultValue: LDGenerationConfig = {
72+
const defaultValue: LDAIDefaults = {
7473
model: { modelId: 'test', name: 'test-model' },
7574
prompt: [],
7675
};
@@ -84,13 +83,13 @@ it('includes context in variables for prompt interpolation', async () => {
8483

8584
const result = await client.modelConfig(key, testContext, defaultValue);
8685

87-
expect(result.config.prompt?.[0].content).toBe('User key: test-user');
86+
expect(result.prompt?.[0].content).toBe('User key: test-user');
8887
});
8988

9089
it('handles missing metadata in variation', async () => {
9190
const client = new LDAIClientImpl(mockLdClient);
9291
const key = 'test-flag';
93-
const defaultValue: LDGenerationConfig = {
92+
const defaultValue: LDAIDefaults = {
9493
model: { modelId: 'test', name: 'test-model' },
9594
prompt: [],
9695
};
@@ -105,10 +104,8 @@ it('handles missing metadata in variation', async () => {
105104
const result = await client.modelConfig(key, testContext, defaultValue);
106105

107106
expect(result).toEqual({
108-
config: {
109-
model: { modelId: 'example-provider', name: 'imagination' },
110-
prompt: [{ role: 'system', content: 'Hello' }],
111-
},
107+
model: { modelId: 'example-provider', name: 'imagination' },
108+
prompt: [{ role: 'system', content: 'Hello' }],
112109
tracker: expect.any(Object),
113110
enabled: false,
114111
});
@@ -117,17 +114,19 @@ it('handles missing metadata in variation', async () => {
117114
it('passes the default value to the underlying client', async () => {
118115
const client = new LDAIClientImpl(mockLdClient);
119116
const key = 'non-existent-flag';
120-
const defaultValue: LDGenerationConfig = {
117+
const defaultValue: LDAIDefaults = {
121118
model: { modelId: 'default-model', name: 'default' },
122119
prompt: [{ role: 'system', content: 'Default prompt' }],
120+
enabled: true,
123121
};
124122

125123
mockLdClient.variation.mockResolvedValue(defaultValue);
126124

127125
const result = await client.modelConfig(key, testContext, defaultValue);
128126

129127
expect(result).toEqual({
130-
config: defaultValue,
128+
model: defaultValue.model,
129+
prompt: defaultValue.prompt,
131130
tracker: expect.any(Object),
132131
enabled: false,
133132
});

packages/sdk/server-ai/examples/bedrock/src/index.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,12 @@ async function main() {
6666
const completion = tracker.trackBedrockConverse(
6767
await awsClient.send(
6868
new ConverseCommand({
69-
modelId: aiConfig.config.model?.modelId ?? 'no-model',
70-
messages: mapPromptToConversation(aiConfig.config.prompt ?? []),
69+
modelId: aiConfig.model?.modelId ?? 'no-model',
70+
messages: mapPromptToConversation(aiConfig.prompt ?? []),
71+
inferenceConfig: {
72+
temperature: aiConfig.model?.temperature ?? 0.5,
73+
maxTokens: aiConfig.model?.maxTokens ?? 4096,
74+
},
7175
}),
7276
),
7377
);

packages/sdk/server-ai/examples/openai/src/index.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,10 @@ async function main(): Promise<void> {
6060
const { tracker } = aiConfig;
6161
const completion = await tracker.trackOpenAI(async () =>
6262
client.chat.completions.create({
63-
messages: aiConfig.config.prompt || [],
64-
model: aiConfig.config.model?.modelId || 'gpt-4',
63+
messages: aiConfig.prompt || [],
64+
model: aiConfig.model?.modelId || 'gpt-4',
65+
temperature: aiConfig.model?.temperature ?? 0.5,
66+
max_tokens: aiConfig.model?.maxTokens ?? 4096,
6567
}),
6668
);
6769

packages/sdk/server-ai/src/LDAIClientImpl.ts

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import * as Mustache from 'mustache';
22

33
import { LDContext } from '@launchdarkly/js-server-sdk-common';
44

5-
import { LDAIConfig, LDGenerationConfig, LDMessage, LDModelConfig } from './api/config';
5+
import { LDAIConfig, LDAIDefaults, LDMessage, LDModelConfig } from './api/config';
66
import { LDAIClient } from './api/LDAIClient';
77
import { LDAIConfigTrackerImpl } from './LDAIConfigTrackerImpl';
88
import { LDClientMin } from './LDClientMin';
@@ -32,16 +32,28 @@ export class LDAIClientImpl implements LDAIClient {
3232
return Mustache.render(template, variables, undefined, { escape: (item: any) => item });
3333
}
3434

35-
async modelConfig<TDefault extends LDGenerationConfig>(
35+
async modelConfig(
3636
key: string,
3737
context: LDContext,
38-
defaultValue: TDefault,
38+
defaultValue: LDAIDefaults,
3939
variables?: Record<string, unknown>,
4040
): Promise<LDAIConfig> {
4141
const value: VariationContent = await this._ldClient.variation(key, context, defaultValue);
42+
const tracker = new LDAIConfigTrackerImpl(
43+
this._ldClient,
44+
key,
45+
// eslint-disable-next-line no-underscore-dangle
46+
value._ldMeta?.versionKey ?? '',
47+
context,
48+
);
49+
// eslint-disable-next-line no-underscore-dangle
50+
const enabled = !!value._ldMeta?.enabled;
51+
const config: LDAIConfig = {
52+
tracker,
53+
enabled,
54+
};
4255
// We are going to modify the contents before returning them, so we make a copy.
4356
// This isn't a deep copy and the application developer should not modify the returned content.
44-
const config: LDGenerationConfig = {};
4557
if (value.model) {
4658
config.model = { ...value.model };
4759
}
@@ -54,18 +66,6 @@ export class LDAIClientImpl implements LDAIClient {
5466
}));
5567
}
5668

57-
return {
58-
config,
59-
// eslint-disable-next-line no-underscore-dangle
60-
tracker: new LDAIConfigTrackerImpl(
61-
this._ldClient,
62-
key,
63-
// eslint-disable-next-line no-underscore-dangle
64-
value._ldMeta?.versionKey ?? '',
65-
context,
66-
),
67-
// eslint-disable-next-line no-underscore-dangle
68-
enabled: !!value._ldMeta?.enabled,
69-
};
69+
return config;
7070
}
7171
}

packages/sdk/server-ai/src/api/LDAIClient.ts

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,6 @@
11
import { LDContext } from '@launchdarkly/js-server-sdk-common';
22

3-
import { LDAIConfig, LDGenerationConfig } from './config/LDAIConfig';
4-
5-
/**
6-
* Interface for default model configuration.
7-
*/
8-
export interface LDAIDefaults extends LDGenerationConfig {
9-
/**
10-
* Whether the configuration is enabled.
11-
*/
12-
enabled?: boolean;
13-
}
3+
import { LDAIConfig, LDAIDefaults } from './config/LDAIConfig';
144

155
/**
166
* Interface for performing AI operations using LaunchDarkly.
@@ -77,10 +67,10 @@ export interface LDAIClient {
7767
* }
7868
* ```
7969
*/
80-
modelConfig<TDefault extends LDAIDefaults>(
70+
modelConfig(
8171
key: string,
8272
context: LDContext,
83-
defaultValue: TDefault,
73+
defaultValue: LDAIDefaults,
8474
variables?: Record<string, unknown>,
8575
): Promise<LDAIConfig>;
8676
}

packages/sdk/server-ai/src/api/config/LDAIConfig.ts

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ export interface LDModelConfig {
77
/**
88
* The ID of the model.
99
*/
10-
modelId?: string;
10+
modelId: string;
1111

1212
/**
1313
* Tuning parameter for randomness versus determinism. Exact effect will be determined by the
@@ -41,9 +41,9 @@ export interface LDMessage {
4141
}
4242

4343
/**
44-
* Configuration which affects generation.
44+
* AI configuration and tracker.
4545
*/
46-
export interface LDGenerationConfig {
46+
export interface LDAIConfig {
4747
/**
4848
* Optional model configuration.
4949
*/
@@ -52,16 +52,6 @@ export interface LDGenerationConfig {
5252
* Optional prompt data.
5353
*/
5454
prompt?: LDMessage[];
55-
}
56-
57-
/**
58-
* AI Config value and tracker.
59-
*/
60-
export interface LDAIConfig {
61-
/**
62-
* The result of the AI Config customization.
63-
*/
64-
config: LDGenerationConfig;
6555

6656
/**
6757
* A tracker which can be used to generate analytics.
@@ -73,3 +63,16 @@ export interface LDAIConfig {
7363
*/
7464
enabled: boolean;
7565
}
66+
67+
/**
68+
* Default value for a `modelConfig`. This is the same as the LDAIConfig, but it does not include
69+
* a tracker and `enabled` is optional.
70+
*/
71+
export type LDAIDefaults = Omit<LDAIConfig, 'tracker' | 'enabled'> & {
72+
/**
73+
* Whether the configuration is enabled.
74+
*
75+
* defaults to false
76+
*/
77+
enabled?: boolean;
78+
};

0 commit comments

Comments
 (0)