@@ -22,17 +22,28 @@ def test_list_models():
2222def test_completion (model , logprob ):
2323 prompt = "Once upon a time"
2424 completion = openai .Completion .create (
25- model = model , prompt = prompt , logprobs = logprob , max_tokens = 64
25+ model = model ,
26+ prompt = prompt ,
27+ logprobs = logprob ,
28+ max_tokens = 64 ,
29+ temperature = 0 ,
2630 )
2731 print (f"full text: { prompt + completion .choices [0 ].text } " , flush = True )
2832 if completion .choices [0 ].logprobs is not None :
29- print (f"logprobs: { completion .choices [0 ].logprobs .token_logprobs } " , flush = True )
33+ print (
34+ f"logprobs: { completion .choices [0 ].logprobs .token_logprobs [:10 ]} " ,
35+ flush = True ,
36+ )
3037
3138
3239def test_completion_stream (model ):
3340 prompt = "Once upon a time"
3441 res = openai .Completion .create (
35- model = model , prompt = prompt , max_tokens = 64 , stream = True
42+ model = model ,
43+ prompt = prompt ,
44+ max_tokens = 64 ,
45+ stream = True ,
46+ temperature = 0 ,
3647 )
3748 print (prompt , end = "" )
3849 for chunk in res :
@@ -49,14 +60,18 @@ def test_embedding(model):
4960
5061def test_chat_completion (model ):
5162 completion = openai .ChatCompletion .create (
52- model = model , messages = [{"role" : "user" , "content" : "Hello! What is your name?" }]
63+ model = model ,
64+ messages = [{"role" : "user" , "content" : "Hello! What is your name?" }],
65+ temperature = 0 ,
5366 )
5467 print (completion .choices [0 ].message .content )
5568
5669
5770def test_chat_completion_stream (model ):
5871 messages = [{"role" : "user" , "content" : "Hello! What is your name?" }]
59- res = openai .ChatCompletion .create (model = model , messages = messages , stream = True )
72+ res = openai .ChatCompletion .create (
73+ model = model , messages = messages , stream = True , temperature = 0
74+ )
6075 for chunk in res :
6176 content = chunk ["choices" ][0 ]["delta" ].get ("content" , "" )
6277 print (content , end = "" , flush = True )
0 commit comments