|
7 | 7 | from mellea.stdlib.sampling import RejectionSamplingStrategy |
8 | 8 |
|
9 | 9 |
|
10 | | -class TestLitellmOllama: |
11 | | - m = MelleaSession(LiteLLMBackend()) |
12 | | - |
13 | | - @pytest.mark.qualitative |
14 | | - def test_litellm_ollama_chat(self): |
15 | | - res = self.m.chat("hello world") |
16 | | - assert res is not None |
17 | | - assert isinstance(res, Message) |
18 | | - |
19 | | - @pytest.mark.qualitative |
20 | | - def test_litellm_ollama_instruct(self): |
21 | | - res = self.m.instruct( |
22 | | - "Write an email to the interns.", |
23 | | - requirements=["be funny"], |
24 | | - strategy=RejectionSamplingStrategy(loop_budget=3), |
25 | | - ) |
26 | | - assert res is not None |
27 | | - assert isinstance(res.value, str) |
28 | | - |
29 | | - @pytest.mark.qualitative |
30 | | - def test_litellm_ollama_instruct_options(self): |
31 | | - res = self.m.instruct( |
32 | | - "Write an email to the interns.", |
33 | | - requirements=["be funny"], |
34 | | - model_options={ |
35 | | - ModelOption.SEED: 123, |
36 | | - ModelOption.TEMPERATURE: 0.5, |
37 | | - ModelOption.THINKING: True, |
38 | | - ModelOption.MAX_NEW_TOKENS: 100, |
39 | | - "reasoning_effort": True, |
40 | | - "stream": False, |
41 | | - "homer_simpson": "option should be kicked out", |
42 | | - }, |
43 | | - ) |
44 | | - assert res is not None |
45 | | - assert isinstance(res.value, str) |
46 | | - assert "homer_simpson" not in self.m.ctx.last_output_and_logs()[1].model_options |
47 | | - |
48 | | - @pytest.mark.qualitative |
49 | | - def test_gen_slot(self): |
50 | | - @generative |
51 | | - def is_happy(text: str) -> bool: |
52 | | - """Determine if text is of happy mood.""" |
53 | | - |
54 | | - h = is_happy(self.m, text="I'm enjoying life.") |
55 | | - |
56 | | - assert isinstance(h, bool) |
57 | | - assert h is True |
| 10 | +@pytest.fixture(scope="function") |
| 11 | +def session(): |
| 12 | + """Fresh Ollama session for each test.""" |
| 13 | + session = MelleaSession(LiteLLMBackend()) |
| 14 | + yield session |
| 15 | + session.reset() |
| 16 | + |
| 17 | + |
| 18 | +@pytest.mark.qualitative |
| 19 | +def test_litellm_ollama_chat(session): |
| 20 | + res = session.chat("hello world") |
| 21 | + assert res is not None |
| 22 | + assert isinstance(res, Message) |
| 23 | + |
| 24 | + |
| 25 | +@pytest.mark.qualitative |
| 26 | +def test_litellm_ollama_instruct(session): |
| 27 | + res = session.instruct( |
| 28 | + "Write an email to the interns.", |
| 29 | + requirements=["be funny"], |
| 30 | + strategy=RejectionSamplingStrategy(loop_budget=3), |
| 31 | + ) |
| 32 | + assert res is not None |
| 33 | + assert isinstance(res.value, str) |
| 34 | + |
| 35 | + |
| 36 | +@pytest.mark.qualitative |
| 37 | +def test_litellm_ollama_instruct_options(session): |
| 38 | + res = session.instruct( |
| 39 | + "Write an email to the interns.", |
| 40 | + requirements=["be funny"], |
| 41 | + model_options={ |
| 42 | + ModelOption.SEED: 123, |
| 43 | + ModelOption.TEMPERATURE: 0.5, |
| 44 | + ModelOption.THINKING: True, |
| 45 | + ModelOption.MAX_NEW_TOKENS: 100, |
| 46 | + "reasoning_effort": True, |
| 47 | + "stream": False, |
| 48 | + "homer_simpson": "option should be kicked out", |
| 49 | + }, |
| 50 | + ) |
| 51 | + assert res is not None |
| 52 | + assert isinstance(res.value, str) |
| 53 | + # make sure that homer_simpson is ignored for generation |
| 54 | + assert "homer_simpson" not in session.ctx.last_output_and_logs()[1].model_options |
| 55 | + |
| 56 | + |
| 57 | +@pytest.mark.qualitative |
| 58 | +def test_gen_slot(session): |
| 59 | + @generative |
| 60 | + def is_happy(text: str) -> bool: |
| 61 | + """Determine if text is of happy mood.""" |
| 62 | + |
| 63 | + h = is_happy(session, text="I'm enjoying life.") |
| 64 | + |
| 65 | + assert isinstance(h, bool) |
| 66 | + # should yield to true - but, of course, is model dependent |
| 67 | + assert h is True |
58 | 68 |
|
59 | 69 |
|
60 | 70 | if __name__ == "__main__": |
|
0 commit comments