Skip to content

Commit 9df3e89

Browse files
committed
Allow OpenAI instruct models to utilize partial completions.
1 parent f6d7f5d commit 9df3e89

File tree

1 file changed

+6
-12
lines changed

1 file changed

+6
-12
lines changed

guidance/models/_openai.py

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -131,18 +131,12 @@ def get_role_end(self, name):
131131

132132
def _generator(self, prompt, temperature):
133133
# start the new stream
134-
prompt_end = prompt.find(b'<|endofprompt|>')
135-
if prompt_end >= 0:
136-
stripped_prompt = prompt[:prompt_end]
137-
else:
138-
raise Exception("This model cannot handle prompts that don't match the instruct format!")
139-
140-
# make sure you don't try and instruct the same model twice
141-
if b'<|endofprompt|>' in prompt[prompt_end + len(b'<|endofprompt|>'):]:
142-
raise Exception("This model has been given two separate instruct blocks, but this is not allowed!")
143-
144-
# update our shared data state
145-
self._reset_shared_data(stripped_prompt + b'<|endofprompt|>', temperature)
134+
eop_count = prompt.count(b'<|endofprompt|>')
135+
if eop_count > 1:
136+
raise Exception("This model has been given multiple instruct blocks or <|endofprompt|> tokens, but this is not allowed!")
137+
updated_prompt = prompt + b'<|endofprompt|>' if eop_count == 0 else prompt
138+
139+
self._reset_shared_data(updated_prompt, temperature)
146140

147141
try:
148142
generator = self.client.completions.create(

0 commit comments

Comments
 (0)