We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2d3b951 commit 193ab3cCopy full SHA for 193ab3c
scripts/eval_frames_benchmark.py
@@ -34,7 +34,7 @@ def generate_llm_prompt(prompt: str, wiki_links: List[str]) -> str:
34
return f"Here are the relevant Wikipedia articles:\n{wiki_links}\n\nBased on all the information, answer the query. \n\nQuery: {prompt}\n\n"
35
36
def get_llm_response(prompt: str, model: str) -> str:
37
- response = client.chat.completions.create(
+ response = client.with_options(timeout=1000.0).chat.completions.create(
38
model=model,
39
messages=[
40
{"role": "system", "content": "You are a helpful assistant."},
0 commit comments