@@ -27,22 +27,20 @@ const configuration = new Configuration({
27
27
const openai = new OpenAIApi ( configuration ) ;
28
28
29
29
const makeSampleRequests = async ( ) => {
30
- const models = await openai . listModels ( ) ;
31
- console . log ( { models } ) ;
32
- // const completionOpts = {
33
- // model: 'text-ada-001',
34
- // prompt: 'write a poem about computers',
35
- // };
36
- // const completion1 = await openai.createCompletion(completionOpts);
37
- // console.log('completion 1:', completion1.data);
38
-
39
- // // Ensure that another completion with the same prompt but
40
- // // slightly different options returns a different response:
41
- // const completion2 = await openai.createCompletion({
42
- // ...completionOpts,
43
- // max_tokens: 50,
44
- // });
45
- // console.log('completion 2:', completion2.data);
30
+ const completionOpts = {
31
+ model : 'text-ada-001' ,
32
+ prompt : 'write a poem about computers' ,
33
+ } ;
34
+ const completion1 = await openai . createCompletion ( completionOpts ) ;
35
+ console . log ( 'completion 1:' , completion1 . data ) ;
36
+
37
+ // Ensure that another completion with the same prompt but
38
+ // slightly different options returns a different response:
39
+ const completion2 = await openai . createCompletion ( {
40
+ ...completionOpts ,
41
+ max_tokens : 50 ,
42
+ } ) ;
43
+ console . log ( 'completion 2:' , completion2 . data ) ;
46
44
} ;
47
45
48
46
const main = async ( ) => {
@@ -53,7 +51,7 @@ const main = async () => {
53
51
// Make the same requests a second time to confirm that
54
52
// they are returning a cached result rather than
55
53
// hitting OpenAI:
56
- // await makeSampleRequests();
54
+ await makeSampleRequests ( ) ;
57
55
} ;
58
56
59
57
main ( ) ;
0 commit comments