-
Notifications
You must be signed in to change notification settings - Fork 3
fix: Validate the completion params that are passed as input to the API #3 #17
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 4 commits
3230d76
cd50c93
555932a
7e19ee6
a83295b
11e2985
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| import pytest | ||
|
|
||
| from tlm.utils.chat_completion_validation import _validate_chat_completion_params | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_allows_valid_openai_keys() -> None: | ||
| params = {"messages": [], "model": "gpt-4.1", "temperature": 0.5} | ||
|
|
||
| _validate_chat_completion_params(params) | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_allows_provider_as_none() -> None: | ||
| params = {"messages": [], "model": "gpt-4.1", "temperature": 0.5} | ||
|
|
||
| _validate_chat_completion_params(params) | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_requires_messages() -> None: | ||
| params = {"model": "gpt-4.1-mini"} | ||
|
|
||
| with pytest.raises(ValueError) as exc_info: | ||
| _validate_chat_completion_params(params) | ||
|
|
||
| assert "openai_args must include the following parameter(s): messages" in str(exc_info.value) | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_requires_messages_list() -> None: | ||
| params = {"messages": "not-a-list"} | ||
|
|
||
| with pytest.raises(ValueError) as exc_info: | ||
| _validate_chat_completion_params(params) | ||
|
|
||
| assert "`messages` must be provided as a list" in str(exc_info.value) | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_requires_message_dict() -> None: | ||
| params = {"messages": ["not-a-dict"]} | ||
|
|
||
| with pytest.raises(ValueError) as exc_info: | ||
| _validate_chat_completion_params(params) | ||
|
|
||
| assert "messages[0] must be a dictionary" in str(exc_info.value) | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_requires_role_and_content_strings() -> None: | ||
| params = {"messages": [{"role": 123, "content": None}]} | ||
|
|
||
| with pytest.raises(ValueError) as exc_info: | ||
| _validate_chat_completion_params(params) | ||
|
|
||
| assert "messages[0]['role']" in str(exc_info.value) | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_allows_function_call_without_content() -> None: | ||
| params = { | ||
| "messages": [ | ||
| { | ||
| "role": "assistant", | ||
| "content": None, | ||
| "function_call": {"name": "foo", "arguments": '{"bar": 1}'}, | ||
| } | ||
| ] | ||
| } | ||
|
|
||
| _validate_chat_completion_params(params) | ||
|
|
||
|
|
||
| def test_validate_chat_completion_params_requires_content_when_no_function_call() -> None: | ||
| params = {"messages": [{"role": "assistant", "content": None}]} | ||
|
|
||
| with pytest.raises(ValueError) as exc_info: | ||
| _validate_chat_completion_params(params) | ||
|
|
||
| assert "messages[0]['content'] must be a string." in str(exc_info.value) |
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,53 @@ | ||||||||||||||
| """Validation helpers for chat completion parameter dictionaries.""" | ||||||||||||||
|
|
||||||||||||||
| # from litellm import get_supported_openai_params | ||||||||||||||
| from typing import Any, Callable, FrozenSet, Mapping | ||||||||||||||
|
|
||||||||||||||
| from tlm.types.base import CompletionParams | ||||||||||||||
|
|
||||||||||||||
|
|
||||||||||||||
| ParamValidator = Callable[[Any], None] | ||||||||||||||
|
|
||||||||||||||
| REQUIRED_CHAT_COMPLETION_PARAMS: FrozenSet[str] = frozenset({"messages"}) | ||||||||||||||
|
|
||||||||||||||
|
|
||||||||||||||
| def _validate_messages_param(messages: Any) -> None: | ||||||||||||||
| """Validate the shape of a `messages` param for chat completions.""" | ||||||||||||||
|
|
||||||||||||||
| if not isinstance(messages, list): | ||||||||||||||
| raise ValueError("`messages` must be provided as a list of message dictionaries.") | ||||||||||||||
|
|
||||||||||||||
| for index, message in enumerate(messages): | ||||||||||||||
| if not isinstance(message, dict): | ||||||||||||||
| raise ValueError(f"messages[{index}] must be a dictionary with 'role' and 'content'.") | ||||||||||||||
katwinkl3 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||||||||||||
|
|
||||||||||||||
| role = message.get("role") | ||||||||||||||
| content = message.get("content") | ||||||||||||||
|
|
||||||||||||||
| if role is None or not isinstance(role, str): | ||||||||||||||
| raise ValueError(f"messages[{index}]['role'] must be a non-empty string.") | ||||||||||||||
|
|
||||||||||||||
| if content is None or not isinstance(content, str): | ||||||||||||||
| function_call = message.get("function_call") | ||||||||||||||
| if role != "assistant" or function_call is None: | ||||||||||||||
| raise ValueError(f"messages[{index}]['content'] must be a string.") | ||||||||||||||
|
||||||||||||||
| if role != "assistant" or function_call is None: | |
| raise ValueError(f"messages[{index}]['content'] must be a string.") | |
| if role != "assistant": | |
| raise ValueError(f"Non-assistant message at index {index} must have content.") | |
| if function_call is None: | |
| raise ValueError(f"messages[{index}] must have either content or function_call key.") |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Added this logic after referring to litellm's required field explanation: "content: string or list[dict] or null - The contents of the message. It is required for all messages, but may be null for assistant messages with function calls.", I'll split the the if logic up with more fitting err msg
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
delete unused comment?