Skip to content

Commit 309d86e

Browse files
wskishBill Kish
andauthored
increase text-davinci-003 contextsize to 4097 (#748)
text-davinci-003 supports a context size of 4097 tokens so return 4097 instead of 4000 in modelname_to_contextsize() for text-davinci-003 Co-authored-by: Bill Kish <[email protected]>
1 parent 6ad360b commit 309d86e

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

langchain/llms/openai.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ def get_num_tokens(self, text: str) -> int:
254254
def modelname_to_contextsize(self, modelname: str) -> int:
255255
"""Calculate the maximum number of tokens possible to generate for a model.
256256
257-
text-davinci-003: 4,000 tokens
257+
text-davinci-003: 4,097 tokens
258258
text-curie-001: 2,048 tokens
259259
text-babbage-001: 2,048 tokens
260260
text-ada-001: 2,048 tokens
@@ -273,7 +273,7 @@ def modelname_to_contextsize(self, modelname: str) -> int:
273273
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
274274
"""
275275
if modelname == "text-davinci-003":
276-
return 4000
276+
return 4097
277277
elif modelname == "text-curie-001":
278278
return 2048
279279
elif modelname == "text-babbage-001":
@@ -285,7 +285,7 @@ def modelname_to_contextsize(self, modelname: str) -> int:
285285
elif modelname == "code-cushman-001":
286286
return 2048
287287
else:
288-
return 4000
288+
return 4097
289289

290290
def max_tokens_for_prompt(self, prompt: str) -> int:
291291
"""Calculate the maximum number of tokens possible to generate for a prompt.

0 commit comments

Comments
 (0)