Skip to content

Commit dd35a5e

Browse files
Normalize HA model_info to OpenAI-style capability fields
1 parent 971a996 commit dd35a5e

File tree

6 files changed

+37
-5
lines changed

6 files changed

+37
-5
lines changed

backend/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
"""Backend sync worker service."""
22

3-
__version__ = "0.6.29"
3+
__version__ = "0.6.30"

backend/litellm_client.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -197,6 +197,37 @@ def _extract_tag_value(tags: list[str], prefix: str) -> str | None:
197197
return None
198198

199199

200+
def _apply_openai_capability_metadata(model_info: dict, tags: list[str], model_mode: str) -> None:
201+
"""Normalize capability fields into OpenAI-style model_info supports_* keys."""
202+
tagset = {str(tag).lower() for tag in (tags or [])}
203+
204+
has_chat = "capability:chat" in tagset
205+
has_completion = "capability:completion" in tagset
206+
has_tools = "capability:tools" in tagset or "capability:function_calling" in tagset
207+
has_vision = "capability:vision" in tagset
208+
has_audio = "capability:audio" in tagset or "capability:audio_input" in tagset
209+
210+
# Prefer capability tags to infer mode; fallback to requested mode.
211+
if has_chat:
212+
normalized_mode = "chat"
213+
elif has_completion and not has_chat:
214+
normalized_mode = "completion"
215+
else:
216+
normalized_mode = "completion" if model_mode == "completion" else "chat"
217+
218+
model_info["mode"] = normalized_mode
219+
model_info["supports_chat"] = has_chat or normalized_mode == "chat"
220+
model_info["supports_completion"] = has_completion or normalized_mode == "completion"
221+
model_info["supports_function_calling"] = has_tools
222+
model_info["supports_tool_choice"] = has_tools
223+
model_info["supports_vision"] = has_vision
224+
model_info["supports_audio_input"] = has_audio
225+
226+
if model_info["supports_chat"]:
227+
model_info.setdefault("supports_system_messages", True)
228+
model_info.setdefault("supports_native_streaming", True)
229+
230+
200231
async def list_routing_group_deployments(config) -> list[dict]:
201232
"""Return LiteLLM models tagged as routing groups."""
202233
if not config.litellm_base_url:
@@ -358,6 +389,7 @@ async def push_model_to_litellm(
358389

359390
litellm_params["tags"] = tags
360391
model_info["tags"] = tags
392+
_apply_openai_capability_metadata(model_info, tags, model_mode)
361393

362394
# Add access_groups
363395
access_groups = model.get_effective_access_groups()

frontend/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
"""Frontend API and UI service."""
22

3-
__version__ = "0.6.29"
3+
__version__ = "0.6.30"

proxy/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.6.29"
1+
__version__ = "0.6.30"

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "litellm-companion"
3-
version = "0.6.29"
3+
version = "0.6.30"
44
description = "Synchronize models from Ollama or OpenAI-compatible endpoints into LiteLLM"
55
authors = [
66
{name = "LiteLLM Companion Authors", email = "dev@example.com"}

shared/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
"""Shared code between backend and frontend services."""
22

3-
__version__ = "0.6.29"
3+
__version__ = "0.6.30"

0 commit comments

Comments
 (0)