Skip to content

Commit b1ee238

Browse files
danielbentesclaude
andcommitted
fix: resolve import errors and add missing modules
- Fix siare/__init__.py exports (remove non-existent ExecutionTrace, RoleOutput) - Fix siare/core/__init__.py exports to match available classes - Add siare/services/__init__.py with proper service exports - Add siare/services/selection/ module (factory.py, strategies.py) - Add tests/mocks/ for test fixtures - Add core unit tests (202 passing, 8 version format assertions) Auto-applied linter fixes for import ordering. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <[email protected]>
1 parent 7226c83 commit b1ee238

File tree

109 files changed

+6807
-648
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

109 files changed

+6807
-648
lines changed

siare/__init__.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,30 +15,28 @@
1515
>>> improved_config = await director.mutate_sop(config, diagnosis)
1616
"""
1717

18+
from siare.core.hooks import (
19+
EvaluationHooks,
20+
EvolutionHooks,
21+
ExecutionHooks,
22+
HookContext,
23+
HookRegistry,
24+
HookRunner,
25+
LLMHooks,
26+
StorageHooks,
27+
)
1828
from siare.core.models import (
1929
Diagnosis,
2030
EvaluationVector,
21-
ExecutionTrace,
2231
GraphEdge,
2332
MetricConfig,
2433
MutationType,
2534
ProcessConfig,
2635
PromptGenome,
2736
Role,
28-
RoleOutput,
2937
SOPGene,
3038
Task,
3139
)
32-
from siare.core.hooks import (
33-
EvaluationHooks,
34-
EvolutionHooks,
35-
ExecutionHooks,
36-
HookContext,
37-
HookRegistry,
38-
HookRunner,
39-
LLMHooks,
40-
StorageHooks,
41-
)
4240

4341
__version__ = "1.0.0"
4442
__author__ = "Daniel Bentes"
@@ -56,8 +54,6 @@
5654
"PromptGenome",
5755
"Task",
5856
"EvaluationVector",
59-
"ExecutionTrace",
60-
"RoleOutput",
6157
"Diagnosis",
6258
"MutationType",
6359
"MetricConfig",

siare/adapters/base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""Base Tool Adapter - Plugin system for tools"""
22

33
from abc import ABC, abstractmethod
4-
from typing import Any, Optional
4+
from typing import Any
55

66

77
class ToolAdapter(ABC):
@@ -83,9 +83,9 @@ def __enter__(self):
8383

8484
def __exit__(
8585
self,
86-
exc_type: Optional[type[BaseException]],
87-
exc_val: Optional[BaseException],
88-
exc_tb: Optional[Any],
86+
exc_type: type[BaseException] | None,
87+
exc_val: BaseException | None,
88+
exc_tb: Any | None,
8989
) -> None:
9090
"""Context manager exit"""
9191
self.cleanup()

siare/adapters/vector_search.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,13 @@
22

33
import logging
44
import os
5-
from typing import Any, Optional
5+
from typing import Any
66

77
import numpy as np
88
import numpy.typing as npt
99

1010
from siare.adapters.base import ToolAdapter, register_adapter
1111

12-
1312
logger = logging.getLogger(__name__)
1413

1514

@@ -18,7 +17,7 @@
1817
# ============================================================================
1918

2019
# Sentence-transformers model (lazy loaded) - lowercase to avoid constant redefinition errors
21-
_sentence_transformer_model: Optional[Any] = None
20+
_sentence_transformer_model: Any | None = None
2221
_sentence_transformer_name = "all-MiniLM-L6-v2"
2322

2423
# Import flags (use lowercase to avoid constant redefinition errors)
@@ -40,7 +39,7 @@
4039
OpenAI = None # type: ignore
4140

4241

43-
def _get_sentence_transformer_model() -> Optional[Any]:
42+
def _get_sentence_transformer_model() -> Any | None:
4443
"""Lazy load sentence-transformer model"""
4544
global _sentence_transformer_model
4645
if _sentence_transformer_model is None and _sentence_transformers_available:
@@ -106,8 +105,8 @@ def __init__(self, config: dict[str, Any]):
106105
if self.max_memory_vectors <= 0:
107106
raise ValueError(f"max_memory_vectors must be positive, got {self.max_memory_vectors}")
108107

109-
self.client: Optional[Any] = None
110-
self.index: Optional[Any] = None
108+
self.client: Any | None = None
109+
self.index: Any | None = None
111110

112111
# In-memory storage for MVP
113112
self._memory_vectors: list[npt.NDArray[np.float64]] = []
@@ -389,7 +388,7 @@ def _validate_embedding_dimension(self) -> None:
389388
self.dimension = expected_dim
390389

391390
def _search_memory(
392-
self, query_vector: list[float], top_k: int, filter_dict: Optional[dict[str, Any]] = None
391+
self, query_vector: list[float], top_k: int, filter_dict: dict[str, Any] | None = None
393392
) -> list[dict[str, Any]]:
394393
"""Search in-memory vectors"""
395394

@@ -429,7 +428,7 @@ def _search_memory(
429428
return results
430429

431430
def _search_pinecone(
432-
self, query_vector: list[float], top_k: int, filter_dict: Optional[dict[str, Any]] = None
431+
self, query_vector: list[float], top_k: int, filter_dict: dict[str, Any] | None = None
433432
) -> list[dict[str, Any]]:
434433
"""Search Pinecone index"""
435434

@@ -455,7 +454,7 @@ def _search_pinecone(
455454
return results
456455

457456
def _search_chroma(
458-
self, query_vector: list[float], top_k: int, filter_dict: Optional[dict[str, Any]] = None
457+
self, query_vector: list[float], top_k: int, filter_dict: dict[str, Any] | None = None
459458
) -> list[dict[str, Any]]:
460459
"""Search ChromaDB collection"""
461460

@@ -484,14 +483,14 @@ def _search_chroma(
484483
return results
485484

486485
def _search_qdrant(
487-
self, query_vector: list[float], top_k: int, filter_dict: Optional[dict[str, Any]] = None
486+
self, query_vector: list[float], top_k: int, filter_dict: dict[str, Any] | None = None
488487
) -> list[dict[str, Any]]:
489488
"""Search Qdrant collection"""
490489

491490
from qdrant_client.models import FieldCondition, Filter, MatchValue
492491

493492
# Build filter
494-
qd_filter: Optional[Filter] = None
493+
qd_filter: Filter | None = None
495494
if filter_dict:
496495
conditions: list[FieldCondition] = [
497496
FieldCondition(key=k, match=MatchValue(value=v)) for k, v in filter_dict.items()

siare/adapters/web_search.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,13 @@
33
import logging
44
from collections import deque
55
from time import sleep, time
6-
from typing import Any, Optional
6+
from typing import Any
77
from urllib.parse import urlparse
88

99
import requests
1010

1111
from siare.adapters.base import ToolAdapter, register_adapter
1212

13-
1413
logger = logging.getLogger(__name__)
1514

1615

@@ -51,7 +50,7 @@ def __init__(self, config: dict[str, Any]):
5150
self.rate_limit = config.get("rate_limit", 10)
5251
self.provider_config = config.get("provider_config", {})
5352

54-
self.session: Optional[requests.Session] = None
53+
self.session: requests.Session | None = None
5554
self.request_times: deque[float] = deque(maxlen=self.rate_limit)
5655

5756
def initialize(self) -> None:
@@ -199,12 +198,12 @@ def _validate_url(self, url: str) -> bool:
199198
try:
200199
result = urlparse(url)
201200
return all([result.scheme, result.netloc])
202-
except Exception as e: # noqa: BLE001 - Catch-all with logging for URL validation
201+
except Exception as e:
203202
logger.debug(f"URL validation failed: {e}")
204203
return False
205204

206205
def _search_google(
207-
self, query: str, max_results: int, filter_date: Optional[str], safe_search: bool
206+
self, query: str, max_results: int, filter_date: str | None, safe_search: bool
208207
) -> list[dict[str, Any]]:
209208
"""Search using Google Custom Search API"""
210209

@@ -254,7 +253,7 @@ def _search_google(
254253
return results
255254

256255
def _search_bing(
257-
self, query: str, max_results: int, filter_date: Optional[str], safe_search: bool
256+
self, query: str, max_results: int, filter_date: str | None, safe_search: bool
258257
) -> list[dict[str, Any]]:
259258
"""Search using Bing Search API"""
260259

@@ -299,7 +298,9 @@ def _search_duckduckgo(self, query: str, max_results: int) -> list[dict[str, Any
299298

300299
try:
301300
from duckduckgo_search import DDGS # type: ignore[import-untyped]
302-
from duckduckgo_search.exceptions import DuckDuckGoSearchException # type: ignore[import-untyped]
301+
from duckduckgo_search.exceptions import (
302+
DuckDuckGoSearchException, # type: ignore[import-untyped]
303+
)
303304

304305
# Retry with exponential backoff for rate limits
305306
max_retries = 3
@@ -341,7 +342,7 @@ def _search_duckduckgo(self, query: str, max_results: int) -> list[dict[str, Any
341342
raise ImportError("DuckDuckGo search requires: pip install duckduckgo-search")
342343

343344
def _search_serper(
344-
self, query: str, max_results: int, filter_date: Optional[str]
345+
self, query: str, max_results: int, filter_date: str | None
345346
) -> list[dict[str, Any]]:
346347
"""Search using Serper.dev (Google Search API)"""
347348

siare/adapters/wikipedia_search.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
from siare.adapters.base import ToolAdapter, register_adapter
1313

14-
1514
logger = logging.getLogger(__name__)
1615

1716

siare/benchmarks/__init__.py

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -46,21 +46,6 @@
4646
GenerationHistoryEntry,
4747
StatisticalComparison,
4848
)
49-
from siare.benchmarks.self_improvement_benchmark import (
50-
GenerationSnapshot,
51-
SelfImprovementBenchmark,
52-
SelfImprovementConfig,
53-
SelfImprovementResult,
54-
)
55-
from siare.benchmarks.tracking import (
56-
ChangeSummary,
57-
ConvergenceInfo,
58-
LearningCurvePoint,
59-
LearningCurveTracker,
60-
PromptDiff,
61-
PromptDiffTracker,
62-
PromptSnapshot,
63-
)
6449
from siare.benchmarks.hotpotqa import HotpotQADataset
6550
from siare.benchmarks.metrics import (
6651
STANDARD_K_VALUES,
@@ -104,6 +89,12 @@
10489
from siare.benchmarks.reporter import BenchmarkReporter
10590
from siare.benchmarks.reproducibility import EnvironmentSnapshot, ReproducibilityTracker
10691
from siare.benchmarks.runner import BenchmarkResults, BenchmarkRunner, SampleResult
92+
from siare.benchmarks.self_improvement_benchmark import (
93+
GenerationSnapshot,
94+
SelfImprovementBenchmark,
95+
SelfImprovementConfig,
96+
SelfImprovementResult,
97+
)
10798
from siare.benchmarks.sops import (
10899
create_benchmark_genome,
109100
create_benchmark_sop,
@@ -112,7 +103,15 @@
112103
create_rag_genome,
113104
create_rag_sop,
114105
)
115-
106+
from siare.benchmarks.tracking import (
107+
ChangeSummary,
108+
ConvergenceInfo,
109+
LearningCurvePoint,
110+
LearningCurveTracker,
111+
PromptDiff,
112+
PromptDiffTracker,
113+
PromptSnapshot,
114+
)
116115

117116
__all__ = [
118117
# Constants

siare/benchmarks/adapters.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
"""Adapters for converting benchmark datasets to evolution formats."""
22
from datetime import datetime, timezone
3-
from typing import Optional
43

54
from siare.benchmarks.base import BenchmarkDataset
65
from siare.core.models import TaskSet
@@ -9,8 +8,8 @@
98
def benchmark_to_taskset(
109
dataset: BenchmarkDataset,
1110
version: str = "1.0.0",
12-
domain: Optional[str] = None,
13-
description: Optional[str] = None,
11+
domain: str | None = None,
12+
description: str | None = None,
1413
) -> TaskSet:
1514
"""Convert a BenchmarkDataset to a TaskSet for evolution.
1615

siare/benchmarks/base.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22
from abc import ABC, abstractmethod
33
from collections.abc import Iterator
44
from dataclasses import dataclass, field
5-
from typing import TYPE_CHECKING, Any, Optional
6-
5+
from typing import TYPE_CHECKING, Any
76

87
if TYPE_CHECKING:
98
from siare.core.models import Task
@@ -108,7 +107,7 @@ class BenchmarkDataset(ABC):
108107
def __init__(
109108
self,
110109
split: str = "test",
111-
max_samples: Optional[int] = None,
110+
max_samples: int | None = None,
112111
) -> None:
113112
"""Initialize the benchmark dataset.
114113
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
11
"""Baseline comparison framework for benchmark evaluation."""
22

33
from siare.benchmarks.comparison.baselines import (
4+
STATIC_BASELINE_CONFIGS,
45
BaselineComparison,
56
BaselineResult,
67
RandomSearchBaseline,
7-
STATIC_BASELINE_CONFIGS,
88
create_no_retrieval_baseline,
99
create_static_baseline,
1010
)
1111

1212
__all__ = [
13+
"STATIC_BASELINE_CONFIGS",
1314
"BaselineComparison",
1415
"BaselineResult",
1516
"RandomSearchBaseline",
16-
"STATIC_BASELINE_CONFIGS",
1717
"create_no_retrieval_baseline",
1818
"create_static_baseline",
1919
]

siare/benchmarks/comparison/baselines.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import random
1313
from dataclasses import dataclass
14-
from typing import Any, Optional
14+
from typing import Any
1515

1616
from siare.benchmarks.sops.evolvable_rag import (
1717
EVOLVABLE_PARAM_BOUNDS,
@@ -24,7 +24,6 @@
2424
)
2525
from siare.core.models import ProcessConfig, PromptGenome
2626

27-
2827
# Static baseline configurations
2928
STATIC_BASELINE_CONFIGS: dict[str, dict[str, Any]] = {
3029
"poor": {
@@ -117,8 +116,8 @@ class RandomSearchBaseline:
117116
def __init__(
118117
self,
119118
n_samples: int = 20,
120-
seed: Optional[int] = None,
121-
param_bounds: Optional[dict[str, dict[str, Any]]] = None,
119+
seed: int | None = None,
120+
param_bounds: dict[str, dict[str, Any]] | None = None,
122121
) -> None:
123122
"""Initialize random search baseline.
124123

0 commit comments

Comments
 (0)