File tree Expand file tree Collapse file tree 1 file changed +6
-1
lines changed Expand file tree Collapse file tree 1 file changed +6
-1
lines changed Original file line number Diff line number Diff line change @@ -182,14 +182,15 @@ def _generate(
182
182
generations = generations , llm_output = {"token_usage" : token_usage }
183
183
)
184
184
185
- def stream (self , prompt : str ) -> Generator :
185
+ def stream (self , prompt : str , stop : Optional [ List [ str ]] = None ) -> Generator :
186
186
"""Call OpenAI with streaming flag and return the resulting generator.
187
187
188
188
BETA: this is a beta feature while we figure out the right abstraction.
189
189
Once that happens, this interface could change.
190
190
191
191
Args:
192
192
prompt: The prompts to pass into the model.
193
+ stop: Optional list of stop words to use when generating.
193
194
194
195
Returns:
195
196
A generator representing the stream of tokens from OpenAI.
@@ -204,6 +205,10 @@ def stream(self, prompt: str) -> Generator:
204
205
params = self ._invocation_params
205
206
if params ["best_of" ] != 1 :
206
207
raise ValueError ("OpenAI only supports best_of == 1 for streaming" )
208
+ if stop is not None :
209
+ if "stop" in params :
210
+ raise ValueError ("`stop` found in both the input and default params." )
211
+ params ["stop" ] = stop
207
212
params ["stream" ] = True
208
213
generator = self .client .create (prompt = prompt , ** params )
209
214
You can’t perform that action at this time.
0 commit comments