We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
debug
1 parent 78dac69 commit deab116Copy full SHA for deab116
paperqa/configs/debug.json
@@ -1,6 +1,6 @@
1
{
2
- "llm": "text-completion-openai/babbage-002",
3
- "summary_llm": "text-completion-openai/babbage-002",
+ "llm": "claude-3-haiku-20240307",
+ "summary_llm": "claude-3-haiku-20240307",
4
"answer": {
5
"evidence_k": 2,
6
"evidence_summary_length": "25 to 50 words",
tests/test_paperqa.py
@@ -467,6 +467,9 @@ async def test_evidence(docs_fixture) -> None:
467
)
468
).contexts
469
assert len(evidence) >= debug_settings.answer.evidence_k
470
+ assert len({e.context for e in evidence}) == len(
471
+ evidence
472
+ ), "Expected unique contexts"
473
474
475
@pytest.mark.asyncio
0 commit comments