Skip to content

Commit 9e015e7

Browse files
authored
fix(server): unit tests (#1729)
Signed-off-by: Radek Ježek <[email protected]>
1 parent 0626680 commit 9e015e7

File tree

6 files changed

+27
-29
lines changed

6 files changed

+27
-29
lines changed

apps/agentstack-sdk-py/src/agentstack_sdk/platform/context.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,15 @@ class ContextToken(pydantic.BaseModel):
3131
expires_at: pydantic.AwareDatetime | None = None
3232

3333

34-
class ResourceIdPermission(pydantic.BaseModel):
35-
id: str
36-
37-
3834
class ContextPermissions(pydantic.BaseModel):
3935
files: set[Literal["read", "write", "extract", "*"]] = set()
4036
vector_stores: set[Literal["read", "write", "*"]] = set()
4137
context_data: set[Literal["read", "write", "*"]] = set()
4238

4339

4440
class Permissions(ContextPermissions):
45-
llm: set[Literal["*"] | ResourceIdPermission] = set()
46-
embeddings: set[Literal["*"] | ResourceIdPermission] = set()
41+
llm: set[Literal["*"] | str] = set()
42+
embeddings: set[Literal["*"] | str] = set()
4743
a2a_proxy: set[Literal["*"]] = set()
4844
model_providers: set[Literal["read", "write", "*"]] = set()
4945
variables: SerializeAsAny[set[Literal["read", "write", "*"]]] = set()

apps/agentstack-server/src/agentstack_server/api/schema/contexts.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from agentstack_server.api.schema.common import PaginationQuery
99
from agentstack_server.domain.models.common import Metadata, MetadataPatch
1010
from agentstack_server.domain.models.context import ContextHistoryItemData
11-
from agentstack_server.domain.models.permissions import ResourceIdPermission
1211

1312

1413
class ContextCreateRequest(BaseModel):
@@ -47,8 +46,8 @@ class GlobalPermissionGrant(BaseModel):
4746
vector_stores: list[Literal["read", "write", "*"]] = Field(default_factory=list)
4847

4948
# openai proxy
50-
llm: list[Literal["*"] | ResourceIdPermission] = Field(default_factory=list)
51-
embeddings: list[Literal["*"] | ResourceIdPermission] = Field(default_factory=list)
49+
llm: list[Literal["*"] | str] = Field(default_factory=list)
50+
embeddings: list[Literal["*"] | str] = Field(default_factory=list)
5251
model_providers: list[Literal["read", "write", "*"]] = Field(default_factory=list)
5352

5453
a2a_proxy: list[Literal["*"]] = Field(default_factory=list)

apps/agentstack-server/src/agentstack_server/domain/models/permissions.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,6 @@
99
from agentstack_server.domain.models.user import User
1010

1111

12-
class ResourceIdPermission(BaseModel):
13-
id: str
14-
model_config = ConfigDict(frozen=True)
15-
16-
1712
class Permissions(BaseModel):
1813
model_config = ConfigDict(frozen=True, validate_default=True)
1914

@@ -32,8 +27,8 @@ class Permissions(BaseModel):
3227

3328
# openai proxy
3429
model_providers: SerializeAsAny[set[Literal["read", "write", "*"]]] = set()
35-
llm: SerializeAsAny[set[Literal["*"] | ResourceIdPermission]] = set()
36-
embeddings: SerializeAsAny[set[Literal["*"] | ResourceIdPermission]] = set()
30+
llm: SerializeAsAny[set[Literal["*"] | str]] = set()
31+
embeddings: SerializeAsAny[set[Literal["*"] | str]] = set()
3732

3833
a2a_proxy: SerializeAsAny[set[Literal["*"]]] = set()
3934

apps/agentstack-server/tasks.toml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ sources = ["uv.lock", "pyproject.toml"]
88
outputs = { auto = true }
99

1010
# check
11+
["agentstack-server:test"]
12+
depends = ["agentstack-server:test:unit"]
1113

1214
["agentstack-server:check"]
1315
depends = ["agentstack-server:check:*"]
@@ -248,6 +250,10 @@ run = "{{ mise_bin }} run agentstack-server:dev:disconnect --vm-name=agentstack-
248250
dir = "{{config_root}}/apps/agentstack-server"
249251
run = "{{ mise_bin }} run agentstack-server:dev:reconnect --vm-name=agentstack-local-test"
250252

253+
["agentstack-server:test:unit"]
254+
dir = "{{config_root}}/apps/agentstack-server"
255+
run = "uv run pytest -m unit"
256+
251257
["agentstack-server:test:e2e"]
252258
dir = "{{config_root}}/apps/agentstack-server"
253259
run = """

apps/agentstack-server/tests/unit/domain/models/test_permissions.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
import pytest
55

6-
from agentstack_server.domain.models.permissions import Permissions, ResourceIdPermission
6+
from agentstack_server.domain.models.permissions import Permissions
77

88
pytestmark = pytest.mark.unit
99

@@ -86,8 +86,8 @@ def test_check_resource_id_wildcard():
8686

8787

8888
def test_check_resource_id_permissions():
89-
resource1 = ResourceIdPermission(id="llm-1")
90-
resource2 = ResourceIdPermission(id="llm-2")
89+
resource1 = "llm-1"
90+
resource2 = "llm-2"
9191

9292
user_perms = Permissions(llm={resource1})
9393
required_perms = Permissions(llm={resource1})
@@ -170,8 +170,8 @@ def test_union_admin_permissions():
170170

171171

172172
def test_union_resource_id_permissions():
173-
resource1 = ResourceIdPermission(id="llm-1")
174-
resource2 = ResourceIdPermission(id="llm-2")
173+
resource1 = "llm-1"
174+
resource2 = "llm-2"
175175

176176
perms1 = Permissions(llm={resource1})
177177
perms2 = Permissions(llm={resource2})

apps/agentstack-server/tests/unit/service_layer/services/test_model_provider_match.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
# Copyright 2025 © BeeAI a Series of LF Projects, LLC
22
# SPDX-License-Identifier: Apache-2.0
33

4+
from unittest.mock import Mock
5+
46
import pytest
57

68
from agentstack_server.domain.models.model_provider import ModelCapability
@@ -14,7 +16,7 @@ class TestModelProviderMatchModels:
1416

1517
def test_default_model_gets_exactly_half_score(self):
1618
"""Test that default models get exactly 0.5 score."""
17-
service = ModelProviderService(uow=None) # We don't need UoW for internal method
19+
service = ModelProviderService(uow=None, openai_proxy=Mock()) # We don't need UoW for internal method
1820

1921
available_models = [
2022
"openai:gpt-4",
@@ -54,7 +56,7 @@ def test_default_model_gets_exactly_half_score(self):
5456

5557
def test_exact_match_gets_score_of_one(self):
5658
"""Test that exact matches get score of 1.0."""
57-
service = ModelProviderService(uow=None)
59+
service = ModelProviderService(uow=None, openai_proxy=Mock())
5860

5961
available_models = ["openai:gpt-4", "openai:gpt-3.5-turbo", "anthropic:claude-3-5-sonnet"]
6062

@@ -75,7 +77,7 @@ def test_exact_match_gets_score_of_one(self):
7577

7678
def test_partial_match_gets_score_between_half_and_one(self):
7779
"""Test that partial matches get scores between 0.5 and 1.0."""
78-
service = ModelProviderService(uow=None)
80+
service = ModelProviderService(uow=None, openai_proxy=Mock())
7981

8082
available_models = [
8183
"openai:gpt-4",
@@ -98,7 +100,7 @@ def test_partial_match_gets_score_between_half_and_one(self):
98100

99101
def test_no_match_below_cutoff_gets_no_score(self):
100102
"""Test that matches below cutoff don't appear in results."""
101-
service = ModelProviderService(uow=None)
103+
service = ModelProviderService(uow=None, openai_proxy=Mock())
102104

103105
available_models = ["openai:gpt-4", "anthropic:claude-3-5-sonnet"]
104106

@@ -117,7 +119,7 @@ def test_no_match_below_cutoff_gets_no_score(self):
117119

118120
def test_default_model_gets_max_of_default_and_fuzzy_score(self):
119121
"""Test that default models get max of default score (0.5) and fuzzy match score."""
120-
service = ModelProviderService(uow=None)
122+
service = ModelProviderService(uow=None, openai_proxy=Mock())
121123

122124
available_models = ["openai:gpt-4", "openai:gpt-3.5-turbo"]
123125

@@ -151,7 +153,7 @@ def test_default_model_gets_max_of_default_and_fuzzy_score(self):
151153

152154
def test_default_model_stays_exactly_half_when_no_fuzzy_match(self):
153155
"""Test that default models stay at exactly 0.5 when there's no fuzzy matching improvement."""
154-
service = ModelProviderService(uow=None)
156+
service = ModelProviderService(uow=None, openai_proxy=Mock())
155157

156158
available_models = [
157159
"openai:gpt-4",
@@ -183,7 +185,7 @@ def test_default_model_stays_exactly_half_when_no_fuzzy_match(self):
183185

184186
def test_multiple_suggestions_best_match_wins(self):
185187
"""Test that when multiple suggestions match, the best score is used."""
186-
service = ModelProviderService(uow=None)
188+
service = ModelProviderService(uow=None, openai_proxy=Mock())
187189

188190
available_models = ["openai:gpt-4"]
189191

@@ -205,7 +207,7 @@ def test_multiple_suggestions_best_match_wins(self):
205207

206208
def test_results_sorted_by_score_descending(self):
207209
"""Test that results are sorted by score in descending order."""
208-
service = ModelProviderService(uow=None)
210+
service = ModelProviderService(uow=None, openai_proxy=Mock())
209211

210212
available_models = ["openai:gpt-4", "openai:gpt-3.5-turbo", "anthropic:claude-3-5-sonnet"]
211213

0 commit comments

Comments
 (0)