Skip to content

Commit fce3e1f

Browse files
authored
fix(server): add embedding capability to rits provider (#1480)
Signed-off-by: Radek Ježek <[email protected]>
1 parent 6e776c3 commit fce3e1f

File tree

3 files changed

+17
-10
lines changed

3 files changed

+17
-10
lines changed

apps/agentstack-server/src/agentstack_server/domain/models/model_provider.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,6 @@ class ModelWithScore(BaseModel):
220220
ModelProviderType.PERPLEXITY: {ModelCapability.LLM},
221221
ModelProviderType.TOGETHER: {ModelCapability.LLM},
222222
ModelProviderType.VOYAGE: {ModelCapability.EMBEDDING},
223-
ModelProviderType.RITS: {ModelCapability.LLM},
223+
ModelProviderType.RITS: {ModelCapability.LLM, ModelCapability.EMBEDDING},
224224
ModelProviderType.OTHER: {ModelCapability.LLM, ModelCapability.EMBEDDING}, # Other can support both
225225
}

apps/agentstack-server/src/agentstack_server/exceptions.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,10 @@ def __init__(self, entity: str, max_size: int, status_code: int = status.HTTP_41
8686

8787
class ModelLoadFailedError(PlatformError):
8888
def __init__(self, provider: ModelProvider, exception: HTTPError, status_code: int = status.HTTP_400_BAD_REQUEST):
89+
from agentstack_server.application import extract_messages
90+
8991
super().__init__(
90-
f"Failed to load models from {provider.type} provider ({provider.base_url}): {exception}",
92+
f"Failed to load models from {provider.type} provider ({provider.base_url}): {extract_messages(exception)}",
9193
status_code=status_code,
9294
)
9395

apps/agentstack-server/tests/e2e/conftest.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
# Copyright 2025 © BeeAI a Series of LF Projects, LLC
22
# SPDX-License-Identifier: Apache-2.0
3-
3+
import json
44
import logging
55
import socket
66
from collections.abc import AsyncIterator, Awaitable, Callable
7-
from contextlib import asynccontextmanager, closing
7+
from contextlib import asynccontextmanager, closing, suppress
88
from typing import Any
99

1010
import httpx
@@ -59,10 +59,15 @@ def free_port() -> int:
5959

6060
@pytest.fixture()
6161
async def setup_real_llm(test_configuration, setup_platform_client):
62-
await ModelProvider.create(
63-
name="test_config",
64-
type=test_configuration.llm_provider_type,
65-
base_url=test_configuration.llm_api_base.get_secret_value(),
66-
api_key=test_configuration.llm_api_key.get_secret_value(),
67-
)
62+
try:
63+
await ModelProvider.create(
64+
name="test_config",
65+
type=test_configuration.llm_provider_type,
66+
base_url=test_configuration.llm_api_base.get_secret_value(),
67+
api_key=test_configuration.llm_api_key.get_secret_value(),
68+
)
69+
except httpx.HTTPStatusError as ex:
70+
with suppress(Exception):
71+
ex = Exception(str(f"Failed to setup LLM - {ex}\n{json.dumps(ex.response.text, indent=2)}"))
72+
raise ex
6873
await SystemConfiguration.update(default_llm_model=test_configuration.llm_model)

0 commit comments

Comments
 (0)