Skip to content

Commit f87f30d

Browse files
committed
Refine typing.
1 parent 7d91ea8 commit f87f30d

File tree

16 files changed

+273
-145
lines changed

16 files changed

+273
-145
lines changed

packages/sdk/ai/examples/bedrock/src/index.ts

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,6 @@ const context = {
2929

3030
console.log('*** SDK successfully initialized');
3131

32-
interface MyModelConfig {
33-
modelId: string;
34-
prompt: { role: ConversationRole; content: string }[];
35-
}
36-
3732
function mapPromptToConversation(prompt: { role: ConversationRole; content: string }[]): Message[] {
3833
return prompt.map((item) => ({
3934
role: item.role,
@@ -64,12 +59,11 @@ async function main() {
6459
}
6560

6661
if (tracker) {
67-
const modelConfig = configValue.config as MyModelConfig;
6862
const completion = await tracker.trackBedrockConverse(
6963
await awsClient.send(
7064
new ConverseCommand({
71-
modelId: modelConfig.modelId,
72-
messages: mapPromptToConversation(modelConfig.prompt),
65+
modelId: configValue.config?.model?.modelId ?? 'default model',
66+
messages: mapPromptToConversation(modelConfig?.prompt ?? 'default prompt'),
7367
}),
7468
),
7569
);

packages/sdk/ai/src/LDAIConfigTrackerImpl.ts

Lines changed: 26 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,25 @@
11
import { LDClient, LDContext } from '@launchdarkly/node-server-sdk';
22

33
import { LDAIConfigTracker } from './api/config';
4-
import { createBedrockTokenUsage, FeedbackKind, TokenUsage } from './api/metrics';
4+
import { createBedrockTokenUsage, LDFeedbackKind, LDTokenUsage } from './api/metrics';
55
import { createOpenAiUsage } from './api/metrics/OpenAiUsage';
66

77
export class LDAIConfigTrackerImpl implements LDAIConfigTracker {
88
private _ldClient: LDClient;
9-
private _variationId: string;
9+
private _versionId: string;
1010
private _configKey: string;
1111
private _context: LDContext;
1212

1313
constructor(ldClient: LDClient, configKey: string, versionId: string, context: LDContext) {
1414
this._ldClient = ldClient;
15-
this._variationId = versionId;
15+
this._versionId = versionId;
1616
this._configKey = configKey;
1717
this._context = context;
1818
}
1919

20-
private _getTrackData(): { variationId: string; configKey: string } {
20+
private _getTrackData(): { versionId: string; configKey: string } {
2121
return {
22-
variationId: this._variationId,
22+
versionId: this._versionId,
2323
configKey: this._configKey,
2424
};
2525
}
@@ -37,44 +37,42 @@ export class LDAIConfigTrackerImpl implements LDAIConfigTracker {
3737
return result;
3838
}
3939

40-
trackError(error: number): void {
41-
this._ldClient.track('$ld:ai:error', this._context, this._getTrackData(), error);
42-
}
43-
44-
trackFeedback(feedback: { kind: FeedbackKind }): void {
45-
if (feedback.kind === FeedbackKind.Positive) {
40+
trackFeedback(feedback: { kind: LDFeedbackKind }): void {
41+
if (feedback.kind === LDFeedbackKind.Positive) {
4642
this._ldClient.track('$ld:ai:feedback:user:positive', this._context, this._getTrackData(), 1);
47-
} else if (feedback.kind === FeedbackKind.Negative) {
43+
} else if (feedback.kind === LDFeedbackKind.Negative) {
4844
this._ldClient.track('$ld:ai:feedback:user:negative', this._context, this._getTrackData(), 1);
4945
}
5046
}
5147

52-
trackGeneration(generation: number): void {
53-
this._ldClient.track('$ld:ai:generation', this._context, this._getTrackData(), generation);
48+
trackSuccess(): void {
49+
this._ldClient.track('$ld:ai:generation', this._context, this._getTrackData(), 1);
5450
}
5551

56-
async trackOpenAI(func: (...args: any[]) => Promise<any>, ...args: any[]): Promise<any> {
52+
async trackOpenAI<TRes>(func: (...args: any[]) => Promise<TRes>, ...args: any[]): Promise<TRes> {
5753
const result = await this.trackDurationOf(func, ...args);
58-
this.trackGeneration(1);
54+
this.trackSuccess();
5955
if (result.usage) {
6056
this.trackTokens(createOpenAiUsage(result.usage));
6157
}
6258
return result;
6359
}
6460

65-
async trackBedrockConverse(res: {
66-
$metadata?: { httpStatusCode: number };
67-
metrics?: { latencyMs: number };
68-
usage?: {
69-
inputTokens: number;
70-
outputTokens: number;
71-
totalTokens: number;
72-
};
73-
}): Promise<any> {
61+
trackBedrockConverse<
62+
TRes extends {
63+
$metadata?: { httpStatusCode: number };
64+
metrics?: { latencyMs: number };
65+
usage?: {
66+
inputTokens: number;
67+
outputTokens: number;
68+
totalTokens: number;
69+
};
70+
},
71+
>(res: TRes): TRes {
7472
if (res.$metadata?.httpStatusCode === 200) {
75-
this.trackGeneration(1);
73+
this.trackSuccess();
7674
} else if (res.$metadata?.httpStatusCode && res.$metadata.httpStatusCode >= 400) {
77-
this.trackError(res.$metadata.httpStatusCode);
75+
// this.trackError(res.$metadata.httpStatusCode);
7876
}
7977
if (res.metrics) {
8078
this.trackDuration(res.metrics.latencyMs);
@@ -85,7 +83,7 @@ export class LDAIConfigTrackerImpl implements LDAIConfigTracker {
8583
return res;
8684
}
8785

88-
trackTokens(tokens: TokenUsage): void {
86+
trackTokens(tokens: LDTokenUsage): void {
8987
const trackData = this._getTrackData();
9088
if (tokens.total > 0) {
9189
this._ldClient.track('$ld:ai:tokens:total', this._context, trackData, tokens.total);
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
import { LDContext } from '@launchdarkly/node-server-sdk';
2+
import { LDAIConfig, LDGenerationConfig } from './config/LDAIConfig';
3+
4+
/**
5+
* Interface for performing AI operations using LaunchDarkly.
6+
*/
7+
8+
export interface AIClient {
9+
/**
10+
* Parses and interpolates a template string with the provided variables.
11+
*
12+
* @param template - The template string to be parsed and interpolated.
13+
* @param variables - An object containing the variables to be used for interpolation.
14+
* @returns The interpolated string.
15+
*/
16+
interpolateTemplate(template: string, variables: Record<string, unknown>): string;
17+
18+
/**
19+
* Retrieves and processes a prompt template based on the provided key, LaunchDarkly context, and
20+
* variables.
21+
*
22+
* @param key - A unique identifier for the prompt template. This key is used to fetch the correct
23+
* prompt from storage or configuration.
24+
* @param context - The LaunchDarkly context object that contains relevant information about the
25+
* current environment, user, or session. This context may influence how the prompt is processed
26+
* or personalized.
27+
* @param variables - A map of key-value pairs representing dynamic variables to be injected into
28+
* the prompt template. The keys correspond to placeholders within the template, and the values
29+
* are the corresponding replacements.
30+
* @param defaultValue - A fallback value to be used if the prompt template associated with the
31+
* key is not found or if any errors occur during processing.
32+
*
33+
* @returns The processed prompt after all variables have been substituted in the stored prompt
34+
* template. If the prompt cannot be retrieved or processed, the `defaultValue` is returned.
35+
*
36+
* @example
37+
* ```
38+
* const key = "welcome_prompt";
39+
* const context = {...};
40+
* const variables = {username: 'john'};
41+
* const defaultValue = {};
42+
*
43+
* const result = modelConfig(key, context, defaultValue, variables);
44+
* // Output:
45+
* {
46+
* modelId: "gpt-4o",
47+
* temperature: 0.2,
48+
* maxTokens: 4096,
49+
* userDefinedKey: "myValue",
50+
* prompt: [
51+
* {
52+
* role: "system",
53+
* content: "You are an amazing GPT."
54+
* },
55+
* {
56+
* role: "user",
57+
* content: "Explain how you're an amazing GPT."
58+
* }
59+
* ]
60+
* }
61+
* ```
62+
*/
63+
modelConfig<TDefault extends LDGenerationConfig>(
64+
key: string,
65+
context: LDContext,
66+
defaultValue: TDefault,
67+
variables?: Record<string, unknown>,
68+
): Promise<LDAIConfig>;
69+
}

packages/sdk/ai/src/api/config/LDAIConfig.ts

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,56 @@
11
import { LDAIConfigTracker } from './LDAIConfigTracker';
22

3+
/**
4+
* Configuration related to the model.
5+
*/
6+
export interface LDModelConfig {
7+
/**
8+
* The ID of the model.
9+
*/
10+
modelId?: string;
11+
12+
/**
13+
* And additional model specific information.
14+
*/
15+
[index: string]: unknown;
16+
}
17+
18+
/**
19+
* Information about prompts.
20+
*/
21+
export interface LDPrompt {
22+
/**
23+
* The role of the prompt.
24+
*/
25+
role: 'user' | 'assistant' | 'system';
26+
/**
27+
* Content for the prompt.
28+
*/
29+
content: string;
30+
}
31+
32+
/**
33+
* Configuration which affects generation.
34+
*/
35+
export interface LDGenerationConfig {
36+
/**
37+
* Optional model configuration.
38+
*/
39+
model?: LDModelConfig;
40+
/**
41+
* Optional prompt data.
42+
*/
43+
prompt?: LDPrompt[];
44+
}
45+
346
/**
447
* AI Config value and tracker.
548
*/
649
export interface LDAIConfig {
750
/**
851
* The result of the AI Config customization.
952
*/
10-
config: unknown;
53+
config: LDGenerationConfig;
1154

1255
/**
1356
* A tracker which can be used to generate analytics.
Lines changed: 68 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,71 @@
1-
import { FeedbackKind, TokenUsage } from '../metrics';
1+
import { LDFeedbackKind, LDTokenUsage } from '../metrics';
22

3+
/**
4+
* The LDAIConfigTracker is used to track various details about AI operations.
5+
*/
36
export interface LDAIConfigTracker {
4-
trackDuration: (duration: number) => void;
5-
trackTokens: (tokens: TokenUsage) => void;
6-
trackError: (error: number) => void;
7-
trackGeneration: (generation: number) => void;
8-
trackFeedback: (feedback: { kind: FeedbackKind }) => void;
9-
trackDurationOf: (func: (...args: any[]) => Promise<any>, ...args: any[]) => Promise<any>;
10-
trackOpenAI: (func: (...args: any[]) => Promise<any>, ...args: any[]) => any;
11-
trackBedrockConverse: (res: any) => any;
7+
/**
8+
* Track the duration of generation.
9+
*
10+
* Ideally this would not include overhead time such as network communication.
11+
*
12+
* @param durationMs The duration in milliseconds.
13+
*/
14+
trackDuration(durationMs: number): void;
15+
16+
/**
17+
* Track information about token usage.
18+
*
19+
* @param tokens Token usage information.
20+
*/
21+
trackTokens(tokens: LDTokenUsage): void;
22+
23+
/**
24+
* Generation was successful.
25+
*/
26+
trackSuccess(): void;
27+
28+
/**
29+
* Track sentiment about the generation.
30+
*
31+
* @param feedback Feedback about the generation.
32+
*/
33+
trackFeedback(feedback: { kind: LDFeedbackKind }): void;
34+
35+
/**
36+
* Track the duration of execution of the provided function.
37+
* @param func The function to track the duration of.
38+
* @param args Arguments for the function.
39+
* @returns The result of the function.
40+
*/
41+
trackDurationOf(func: (...args: any[]) => Promise<any>, ...args: any[]): Promise<any>;
42+
43+
/**
44+
* Track an OpenAI operation.
45+
*
46+
* @param func Function which executes the operation.
47+
* @param args Arguments for the operation.
48+
* @returns The result of the operation.
49+
*/
50+
trackOpenAI<TRes>(func: (...args: any[]) => Promise<TRes>, ...args: any[]): Promise<TRes>;
51+
52+
/**
53+
* Track an operation which uses Bedrock.
54+
*
55+
* @param res The result of the Bedrock operation.
56+
* @returns The input operation.
57+
*/
58+
trackBedrockConverse<
59+
TRes extends {
60+
$metadata?: { httpStatusCode: number };
61+
metrics?: { latencyMs: number };
62+
usage?: {
63+
inputTokens: number;
64+
outputTokens: number;
65+
totalTokens: number;
66+
};
67+
},
68+
>(
69+
res: TRes,
70+
): TRes;
1271
}

packages/sdk/ai/src/api/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
export * from './config';
22
export * from './metrics';
3+
export * from './AIClient';

packages/sdk/ai/src/api/metrics/BedrockTokenUsage.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
import { TokenUsage } from './TokenUsage';
1+
import { LDTokenUsage } from './LDTokenUsage';
22

33
export function createBedrockTokenUsage(data: {
44
totalTokens: number;
55
inputTokens: number;
66
outputTokens: number;
7-
}): TokenUsage {
7+
}): LDTokenUsage {
88
return {
99
total: data.totalTokens || 0,
1010
input: data.inputTokens || 0,

packages/sdk/ai/src/api/metrics/FeedbackKind.ts

Lines changed: 0 additions & 4 deletions
This file was deleted.
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
/**
2+
* Feedback about the generated content.
3+
*/
4+
export enum LDFeedbackKind {
5+
/**
6+
* The sentiment was positive.
7+
*/
8+
Positive = 'positive',
9+
/**
10+
* The sentiment is negative.
11+
*/
12+
Negative = 'negative',
13+
}
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
/**
2+
* Information about token usage.
3+
*/
4+
export interface LDTokenUsage {
5+
/**
6+
* Combined token usage.
7+
*/
8+
total: number;
9+
10+
/**
11+
* Number of tokens in the input.
12+
*/
13+
input: number;
14+
15+
/**
16+
* Number of tokens in the output.
17+
*/
18+
output: number;
19+
}

0 commit comments

Comments
 (0)