Skip to content

Commit 9194a8b

Browse files
authored
add stop to stream (#729)
1 parent e3df8ab commit 9194a8b

File tree

1 file changed

+6
-1
lines changed

1 file changed

+6
-1
lines changed

langchain/llms/openai.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,14 +182,15 @@ def _generate(
182182
generations=generations, llm_output={"token_usage": token_usage}
183183
)
184184

185-
def stream(self, prompt: str) -> Generator:
185+
def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator:
186186
"""Call OpenAI with streaming flag and return the resulting generator.
187187
188188
BETA: this is a beta feature while we figure out the right abstraction.
189189
Once that happens, this interface could change.
190190
191191
Args:
192192
prompt: The prompts to pass into the model.
193+
stop: Optional list of stop words to use when generating.
193194
194195
Returns:
195196
A generator representing the stream of tokens from OpenAI.
@@ -204,6 +205,10 @@ def stream(self, prompt: str) -> Generator:
204205
params = self._invocation_params
205206
if params["best_of"] != 1:
206207
raise ValueError("OpenAI only supports best_of == 1 for streaming")
208+
if stop is not None:
209+
if "stop" in params:
210+
raise ValueError("`stop` found in both the input and default params.")
211+
params["stop"] = stop
207212
params["stream"] = True
208213
generator = self.client.create(prompt=prompt, **params)
209214

0 commit comments

Comments
 (0)