@@ -4,6 +4,7 @@ import { generateText, streamText } from 'ai';
44
55import { init , type LDClient , type LDContext } from '@launchdarkly/node-server-sdk' ;
66import { initAi } from '@launchdarkly/server-sdk-ai' ;
7+ import { VercelProvider } from '@launchdarkly/server-sdk-ai-vercel' ;
78
89// Environment variables
910const sdkKey = process . env . LAUNCHDARKLY_SDK_KEY ?? '' ;
@@ -41,31 +42,59 @@ async function main() {
4142 // Get AI configuration from LaunchDarkly
4243 const aiConfig = await aiClient . config ( aiConfigKey , context , { model : { name : 'gpt-4' } } ) ;
4344
44- if ( ! aiConfig . enabled ) {
45+ if ( ! aiConfig . enabled || ! aiConfig . tracker ) {
4546 console . log ( '*** AI configuration is not enabled' ) ;
4647 process . exit ( 0 ) ;
4748 }
4849
4950 console . log ( 'Using model:' , aiConfig . model ?. name ) ;
5051
51- // Example of using generateText (non-streaming)
52- console . log ( '\n*** Generating text:' ) ;
5352 try {
5453 const userMessage = {
5554 role : 'user' as const ,
5655 content : 'What can you help me with?' ,
5756 } ;
5857
59- const result = await aiConfig . tracker . trackVercelAISDKGenerateTextMetrics ( ( ) =>
60- generateText ( aiConfig . toVercelAISDK ( openai , { nonInterpolatedMessages : [ userMessage ] } ) ) ,
58+ // Example of using generateText (non-streaming)
59+ console . log ( '\n*** Generating text:' ) ;
60+
61+ // Convert config to Vercel AI SDK format
62+ const vercelConfig = VercelProvider . toVercelAISDK ( aiConfig , openai , {
63+ nonInterpolatedMessages : [ userMessage ] ,
64+ } ) ;
65+
66+ // Call the model and track metrics for the ai config
67+ const result = await aiConfig . tracker . trackMetricsOf (
68+ VercelProvider . getAIMetricsFromResponse ,
69+ ( ) => generateText ( vercelConfig ) ,
6170 ) ;
71+
6272 console . log ( 'Response:' , result . text ) ;
73+ } catch ( err ) {
74+ console . error ( 'Error:' , err ) ;
75+ }
76+
77+ // Example 2: Using streamText with trackStreamMetricsOf (streaming)
78+ try {
79+ const userMessage = {
80+ role : 'user' as const ,
81+ content : 'Count from 1 to 5.' ,
82+ } ;
6383
64- process . stdout . write ( 'Streaming Response: ' ) ;
65- const streamResult = aiConfig . tracker . trackVercelAISDKStreamTextMetrics ( ( ) =>
66- streamText ( aiConfig . toVercelAISDK ( openai , { nonInterpolatedMessages : [ userMessage ] } ) ) ,
84+ // Example of using generateText (non-streaming)
85+ console . log ( '\n*** Streaming text:' ) ;
86+ // Convert config to Vercel AI SDK format
87+ const vercelConfig = VercelProvider . toVercelAISDK ( aiConfig , openai , {
88+ nonInterpolatedMessages : [ userMessage ] ,
89+ } ) ;
90+
91+ // Stream is returned immediately (synchronously), metrics tracked in background
92+ const streamResult = aiConfig . tracker . trackStreamMetricsOf (
93+ ( ) => streamText ( vercelConfig ) ,
94+ VercelProvider . getAIMetricsFromStream ,
6795 ) ;
6896
97+ // Consume the stream immediately - no await needed before this!
6998 // eslint-disable-next-line no-restricted-syntax
7099 for await ( const textPart of streamResult . textStream ) {
71100 process . stdout . write ( textPart ) ;
0 commit comments