|
| 1 | +import Mustache from 'mustache'; |
| 2 | + |
| 3 | +import { LDClient, LDContext } from '@launchdarkly/node-server-sdk'; |
| 4 | + |
| 5 | +enum FeedbackKind { |
| 6 | + Positive = 'positive', |
| 7 | + Negative = 'negative', |
| 8 | +} |
| 9 | + |
| 10 | +export interface TokenMetrics { |
| 11 | + total: number; |
| 12 | + input: number; |
| 13 | + output: number; |
| 14 | +} |
| 15 | + |
| 16 | +export class AIClient { |
| 17 | + private ldClient: LDClient; |
| 18 | + |
| 19 | + constructor(ldClient: LDClient) { |
| 20 | + this.ldClient = ldClient; |
| 21 | + } |
| 22 | + |
| 23 | + /** |
| 24 | + * Parses and interpolates a template string with the provided variables. |
| 25 | + * |
| 26 | + * @param template - The template string to be parsed and interpolated. |
| 27 | + * @param variables - An object containing the variables to be used for interpolation. |
| 28 | + * @returns The interpolated string. |
| 29 | + */ |
| 30 | + interpolateTemplate(template: string, variables: Record<string, unknown>): string { |
| 31 | + return Mustache.render(template, variables, undefined, { escape: (item: any) => item }); |
| 32 | + } |
| 33 | + |
| 34 | + /** |
| 35 | + * Retrieves and processes a prompt template based on the provided key, LaunchDarkly context, and variables. |
| 36 | + * |
| 37 | + * @param key - A unique identifier for the prompt template. This key is used to fetch the correct prompt from storage or configuration. |
| 38 | + * @param context - The LaunchDarkly context object that contains relevant information about the current environment, user, or session. This context may influence how the prompt is processed or personalized. |
| 39 | + * @param variables - A map of key-value pairs representing dynamic variables to be injected into the prompt template. The keys correspond to placeholders within the template, and the values are the corresponding replacements. |
| 40 | + * @param defaultValue - A fallback value to be used if the prompt template associated with the key is not found or if any errors occur during processing. |
| 41 | + * |
| 42 | + * @returns The processed prompt after all variables have been substituted in the stored prompt template. If the prompt cannot be retrieved or processed, the `defaultValue` is returned. |
| 43 | + * |
| 44 | + * @example |
| 45 | + * const key = "welcome_prompt"; |
| 46 | + * const context = new LDContext(...); |
| 47 | + * const variables = new Record<string, string>([["username", "John"]]); |
| 48 | + * const defaultValue = "Welcome, user!"; |
| 49 | + * |
| 50 | + * const result = modelConfig(key, context, variables, defaultValue); |
| 51 | + * console.log(result); |
| 52 | + * // Output: |
| 53 | + * // { |
| 54 | + * // modelId: "gpt-4o", |
| 55 | + * // temperature: 0.2, |
| 56 | + * // maxTokens: 4096, |
| 57 | + * // userDefinedKey: "myValue", |
| 58 | + * // prompt: [ |
| 59 | + * // { |
| 60 | + * // role: "system", |
| 61 | + * // content: "You are an amazing GPT." |
| 62 | + * // }, |
| 63 | + * // { |
| 64 | + * // role: "user", |
| 65 | + * // content: "Explain how you're an amazing GPT." |
| 66 | + * // } |
| 67 | + * // ] |
| 68 | + * // } |
| 69 | + */ |
| 70 | + async modelConfig( |
| 71 | + key: string, |
| 72 | + context: LDContext, |
| 73 | + defaultValue: string, |
| 74 | + variables?: Record<string, unknown>, |
| 75 | + ): Promise<any> { |
| 76 | + const detail = await this.ldClient.variationDetail(key, context, defaultValue); |
| 77 | + |
| 78 | + const allVariables = { ldctx: context, ...variables }; |
| 79 | + |
| 80 | + detail.value.prompt = detail.value.prompt.map((entry: any) => ({ |
| 81 | + ...entry, |
| 82 | + content: this.interpolateTemplate(entry.content, allVariables), |
| 83 | + })); |
| 84 | + |
| 85 | + return detail.value; |
| 86 | + } |
| 87 | + |
| 88 | + trackDuration(context: LDContext, duration: number) { |
| 89 | + this.ldClient.track('$ld:ai:duration:total', context, duration); |
| 90 | + } |
| 91 | + |
| 92 | + trackTokens(context: LDContext, tokens: TokenMetrics) { |
| 93 | + if (tokens.total > 0) { |
| 94 | + this.ldClient.track('$ld:ai:tokens:total', context, null, tokens.total); |
| 95 | + } |
| 96 | + if (tokens.input > 0) { |
| 97 | + this.ldClient.track('$ld:ai:tokens:input', context, null, tokens.input); |
| 98 | + } |
| 99 | + if (tokens.output > 0) { |
| 100 | + this.ldClient.track('$ld:ai:tokens:output', context, null, tokens.output); |
| 101 | + } |
| 102 | + } |
| 103 | + |
| 104 | + trackError(context: LDContext, error: number) { |
| 105 | + this.ldClient.track('$ld:ai:error', context, null, error); |
| 106 | + } |
| 107 | + |
| 108 | + trackGeneration(context: LDContext, generation: number) { |
| 109 | + this.ldClient.track('$ld:ai:generation', context, null, generation); |
| 110 | + } |
| 111 | + |
| 112 | + trackFeedback(context: LDContext, feedback: { kind: FeedbackKind }) { |
| 113 | + if (feedback.kind === FeedbackKind.Positive) { |
| 114 | + this.ldClient.track('$ld:ai:feedback:user:positive', context, null, 1); |
| 115 | + } else if (feedback.kind === FeedbackKind.Negative) { |
| 116 | + this.ldClient.track('$ld:ai:feedback:user:negative', context, null, 1); |
| 117 | + } |
| 118 | + } |
| 119 | +} |
| 120 | + |
| 121 | +export function init(ldClient: LDClient): AIClient { |
| 122 | + return new AIClient(ldClient); |
| 123 | +} |
| 124 | + |
| 125 | +export interface TokenUsage { |
| 126 | + completionTokens?: number; |
| 127 | + promptTokens?: number; |
| 128 | + totalTokens?: number; |
| 129 | +} |
| 130 | + |
| 131 | +export interface UnderscoreTokenUsage { |
| 132 | + completion_tokens?: number; |
| 133 | + prompt_tokens?: number; |
| 134 | + total_tokens?: number; |
| 135 | +} |
| 136 | + |
| 137 | +export function openAiUsageToTokenMetrics(usage: TokenUsage | UnderscoreTokenUsage): TokenMetrics { |
| 138 | + return { |
| 139 | + total: 'total_tokens' in usage ? usage.total_tokens : (usage as TokenUsage).totalTokens ?? 0, |
| 140 | + input: 'prompt_tokens' in usage ? usage.prompt_tokens : (usage as TokenUsage).promptTokens ?? 0, |
| 141 | + output: |
| 142 | + 'completion_tokens' in usage |
| 143 | + ? usage.completion_tokens |
| 144 | + : (usage as TokenUsage).completionTokens ?? 0, |
| 145 | + }; |
| 146 | +} |
0 commit comments