Skip to content

Commit 2120112

Browse files
authored
fix: collection of small fixes (#238)
* fix: add details to litellm init for model_id * fix: litellm ollama test options * fix: remove LLMaJ template; uses Requirement template
1 parent 633bfd7 commit 2120112

File tree

4 files changed

+14
-23
lines changed

4 files changed

+14
-23
lines changed

mellea/backends/litellm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,12 +54,12 @@ def __init__(
5454
base_url: str | None = "http://localhost:11434",
5555
model_options: dict | None = None,
5656
):
57-
"""Initialize and OpenAI compatible backend. For any additional kwargs that you need to pass the the client, pass them as a part of **kwargs.
57+
"""Initialize an OpenAI compatible backend using the [LiteLLM Python SDK](https://docs.litellm.ai/docs/#litellm-python-sdk).
5858
5959
Note: If getting `Unclosed client session`, set `export DISABLE_AIOHTTP_TRANSPORT=True` in your environment. See: https://github.com/BerriAI/litellm/issues/13251.
6060
6161
Args:
62-
model_id : The LiteLLM model identifier. Make sure that all necessary credentials are in OS environment variables.
62+
model_id : The LiteLLM model identifier; in most cases requires some combination of `<provider>/<model_creator>/<model_name>`. Make sure that all necessary credentials are in OS environment variables.
6363
formatter: A custom formatter based on backend.If None, defaults to TemplateFormatter
6464
base_url : Base url for LLM API. Defaults to None.
6565
model_options : Generation options to pass to the LLM. Defaults to None.

mellea/templates/prompts/default/LLMaJRequirement.jinja2

Lines changed: 0 additions & 15 deletions
This file was deleted.

test/backends/test_litellm_ollama.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -109,11 +109,6 @@ def test_litellm_ollama_instruct_options(session):
109109
ModelOption.SEED: 123,
110110
ModelOption.TEMPERATURE: 0.5,
111111
ModelOption.MAX_NEW_TOKENS: 100,
112-
# Ollama thinking controls currently broken on Granite; see
113-
# https://github.com/ollama/ollama/issues/10983
114-
# TODO: Re-enable when this upstream bug gets fixed.
115-
# ModelOption.THINKING: True,
116-
# "reasoning_effort": True,
117112
"homer_simpson": "option should be kicked out",
118113
}
119114

test/stdlib_basics/test_requirement.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import asyncio
22
import pytest
33
from mellea.stdlib.base import ChatContext, ModelOutputThunk
4-
from mellea.stdlib.requirement import Requirement, simple_validate
4+
from mellea.stdlib.requirement import LLMaJRequirement, Requirement, simple_validate
55
from mellea.stdlib.session import start_session
66

77
ctx = ChatContext()
@@ -19,6 +19,17 @@ async def test_llmaj_validation_req_output_field():
1919
)
2020

2121

22+
async def test_llmaj_requirement_uses_requirement_template():
23+
m = start_session(ctx=ctx)
24+
req = LLMaJRequirement("Must output test.")
25+
assert req._output is None
26+
27+
_ = await req.validate(m.backend, ctx=ctx)
28+
assert req._output is None, (
29+
"requirement's output shouldn't be updated during/after validation"
30+
)
31+
32+
2233
def test_simple_validate_bool():
2334
validation_func = simple_validate(lambda x: False, reason="static reason")
2435
val_result = validation_func(ctx)

0 commit comments

Comments
 (0)