Skip to content

Commit b6604e3

Browse files
feat: add core module structure and types for call_model API
- Create call_model/ module with __init__.py and py.typed marker - Add comprehensive exception hierarchy: - CallModelError: Base exception with code and context - ToolExecutionError: For tool execution failures - ToolValidationError: For Pydantic validation errors - StreamInterruptedError: For stream interruptions - MaxToolRoundsExceededError: For exceeding max rounds - Add type definitions: - ToolType and ResponseState enums - ToolContext and CachedData TypedDicts - Type aliases for ToolCallId, EventType, StreamEvent - All exceptions include actionable error messages with suggestions - Full type hints and comprehensive docstrings - Follows PEP-8 and Python conventions (snake_case) This foundation supports the upcoming ResponseWrapper, ReusableStream, and tool system implementations. Implements: FR-1.1.1, FR-1.6.1, FR-1.6.2, FR-1.6.3, FR-1.6.4, FR-1.6.5
1 parent a1dae77 commit b6604e3

File tree

4 files changed

+345
-0
lines changed

4 files changed

+345
-0
lines changed
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
"""High-level API for OpenRouter model interactions with tool orchestration.
2+
3+
This module provides a Pythonic interface for calling OpenRouter models with
4+
automatic tool execution, streaming support, and multiple consumption patterns.
5+
6+
The call_model API is designed to:
7+
- Provide a simple, high-level interface similar to TypeScript SDK
8+
- Support automatic tool orchestration with validation
9+
- Enable multiple consumption patterns (streaming, complete message, text-only)
10+
- Allow stream reuse without additional API calls
11+
- Follow Python conventions (snake_case, async/await, type hints)
12+
13+
Example:
14+
Basic usage with tools:
15+
16+
>>> from openrouter import OpenRouter
17+
>>> from openrouter.call_model import call_model
18+
>>> from pydantic import BaseModel
19+
>>>
20+
>>> class WeatherParams(BaseModel):
21+
... location: str
22+
... unit: str = "celsius"
23+
>>>
24+
>>> async def main():
25+
... client = OpenRouter(api_key="...")
26+
... response = await call_model(
27+
... client=client,
28+
... request={"model": "gpt-4", "messages": [...]},
29+
... tools=[WeatherParams],
30+
... max_tool_rounds=5
31+
... )
32+
... text = await response.get_text()
33+
... print(text)
34+
35+
For more examples, see the examples/ directory.
36+
"""
37+
38+
from .exceptions import (
39+
CallModelError,
40+
MaxToolRoundsExceededError,
41+
StreamInterruptedError,
42+
ToolExecutionError,
43+
ToolValidationError,
44+
)
45+
from .types import (
46+
CachedData,
47+
EventType,
48+
ResponseState,
49+
StreamEvent,
50+
ToolCallId,
51+
ToolContext,
52+
ToolType,
53+
)
54+
55+
__all__ = [
56+
# Exception classes
57+
"CallModelError",
58+
"MaxToolRoundsExceededError",
59+
"StreamInterruptedError",
60+
"ToolExecutionError",
61+
"ToolValidationError",
62+
# Type definitions
63+
"CachedData",
64+
"EventType",
65+
"ResponseState",
66+
"StreamEvent",
67+
"ToolCallId",
68+
"ToolContext",
69+
"ToolType",
70+
]
71+
72+
# Module metadata
73+
__version__ = "0.1.0"
74+
__author__ = "OpenRouter"
Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
"""Custom exception types for the call_model API.
2+
3+
This module defines the exception hierarchy for call_model operations,
4+
providing actionable error messages with context for debugging.
5+
"""
6+
7+
from typing import Any, Dict, List, Optional
8+
9+
10+
class CallModelError(Exception):
11+
"""Base exception for all call_model errors.
12+
13+
This exception includes optional error codes and context to help
14+
developers understand and resolve issues.
15+
16+
Attributes:
17+
message: Human-readable error description
18+
code: Optional error code for programmatic handling
19+
context: Optional dictionary with additional error context
20+
"""
21+
22+
def __init__(
23+
self,
24+
message: str,
25+
code: Optional[str] = None,
26+
context: Optional[Dict[str, Any]] = None,
27+
):
28+
"""Initialize the error with message, code, and context.
29+
30+
Args:
31+
message: Human-readable error description
32+
code: Optional error code for programmatic handling
33+
context: Optional dictionary with additional error context
34+
"""
35+
super().__init__(message)
36+
self.message = message
37+
self.code = code
38+
self.context = context or {}
39+
40+
def __str__(self) -> str:
41+
"""Return the error message."""
42+
return self.message
43+
44+
45+
class ToolExecutionError(CallModelError):
46+
"""Raised when tool execution fails.
47+
48+
This exception includes the tool name, original error, and input context
49+
to help developers debug tool implementation issues.
50+
51+
Example:
52+
>>> try:
53+
... result = await tool.execute(params, context)
54+
... except Exception as e:
55+
... raise ToolExecutionError(
56+
... tool_name="weather_tool",
57+
... error=e,
58+
... context={"input": params, "tool_call_id": "call_123"}
59+
... )
60+
"""
61+
62+
def __init__(self, tool_name: str, error: Exception, context: Dict[str, Any]):
63+
"""Initialize the tool execution error.
64+
65+
Args:
66+
tool_name: Name of the tool that failed
67+
error: Original exception that was raised
68+
context: Context dictionary with input, tool_call_id, etc.
69+
"""
70+
message = f"Tool '{tool_name}' failed: {error}"
71+
72+
if "input" in context:
73+
message += f"\nInput: {context['input']}"
74+
75+
message += "\nSuggestion: Check tool implementation and input validation"
76+
77+
super().__init__(message=message, code="TOOL_EXECUTION_ERROR", context=context)
78+
self.tool_name = tool_name
79+
self.original_error = error
80+
81+
82+
class ToolValidationError(CallModelError):
83+
"""Raised when tool input validation fails.
84+
85+
This exception includes the validation errors from Pydantic to help
86+
developers understand what inputs were invalid.
87+
88+
Example:
89+
>>> raise ToolValidationError(
90+
... tool_name="weather_tool",
91+
... validation_errors=["Field 'location' is required"]
92+
... )
93+
"""
94+
95+
def __init__(self, tool_name: str, validation_errors: List[str]):
96+
"""Initialize the tool validation error.
97+
98+
Args:
99+
tool_name: Name of the tool that failed validation
100+
validation_errors: List of validation error messages
101+
"""
102+
message = f"Tool '{tool_name}' validation failed:\n"
103+
message += "\n".join(f" - {err}" for err in validation_errors)
104+
message += "\nSuggestion: Ensure input matches the tool's Pydantic schema"
105+
106+
super().__init__(message=message, code="TOOL_VALIDATION_ERROR")
107+
self.tool_name = tool_name
108+
self.validation_errors = validation_errors
109+
110+
111+
class StreamInterruptedError(CallModelError):
112+
"""Raised when stream is interrupted.
113+
114+
This exception includes details about the last successful event
115+
to help developers understand where the stream failed.
116+
117+
Example:
118+
>>> raise StreamInterruptedError(
119+
... last_event={"type": "content.delta", "delta": {...}}
120+
... )
121+
"""
122+
123+
def __init__(self, last_event: Optional[Dict[str, Any]] = None):
124+
"""Initialize the stream interrupted error.
125+
126+
Args:
127+
last_event: Optional dictionary with the last successful event
128+
"""
129+
message = "Stream was interrupted"
130+
131+
if last_event:
132+
message += f"\nLast successful event: {last_event.get('type', 'unknown')}"
133+
134+
message += "\nSuggestion: Check network connection and retry"
135+
136+
super().__init__(message=message, code="STREAM_INTERRUPTED")
137+
self.last_event = last_event
138+
139+
140+
class MaxToolRoundsExceededError(CallModelError):
141+
"""Raised when tool execution rounds exceed limit.
142+
143+
This exception helps developers identify infinite tool loops
144+
or adjust the max_tool_rounds parameter.
145+
146+
Example:
147+
>>> raise MaxToolRoundsExceededError(rounds=10, max_rounds=5)
148+
"""
149+
150+
def __init__(self, rounds: int, max_rounds: int):
151+
"""Initialize the max tool rounds exceeded error.
152+
153+
Args:
154+
rounds: Number of rounds executed
155+
max_rounds: Maximum allowed rounds
156+
"""
157+
message = f"Tool execution exceeded maximum rounds ({rounds}/{max_rounds})"
158+
message += "\nSuggestion: Increase max_tool_rounds or check for tool loops"
159+
160+
super().__init__(message=message, code="MAX_ROUNDS_EXCEEDED")
161+
self.rounds = rounds
162+
self.max_rounds = max_rounds

src/openrouter/call_model/py.typed

Whitespace-only changes.

src/openrouter/call_model/types.py

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
"""Type definitions for the call_model API.
2+
3+
This module defines the core types, enums, and type aliases used throughout
4+
the call_model implementation. All types follow Python typing conventions
5+
with comprehensive docstrings.
6+
"""
7+
8+
from enum import Enum
9+
from typing import Any, Dict, List, Optional, TypedDict
10+
11+
12+
# Type aliases for clarity and maintainability
13+
ToolCallId = str
14+
"""Unique identifier for a tool call."""
15+
16+
EventType = str
17+
"""Type of SSE event (e.g., 'content.delta', 'tool.call')."""
18+
19+
StreamEvent = Dict[str, Any]
20+
"""Raw SSE event dictionary from the API."""
21+
22+
23+
class ToolType(str, Enum):
24+
"""Enumeration of supported tool types.
25+
26+
Currently only function tools are supported. This enum allows for
27+
future extension to other tool types.
28+
29+
Attributes:
30+
FUNCTION: Standard function-based tool
31+
"""
32+
33+
FUNCTION = "function"
34+
35+
36+
class ToolContext(TypedDict, total=False):
37+
"""Context passed to tool execute methods.
38+
39+
This context provides tools with information about the current
40+
conversation state, enabling context-aware tool execution.
41+
42+
Attributes:
43+
number_of_turns: 1-indexed turn number (first turn = 1)
44+
message_history: List of all messages in the conversation
45+
model: Primary model being used (if single model)
46+
models: List of models (if using model routing)
47+
previous_tool_results: Results from previous tool executions
48+
request_id: Unique identifier for this request
49+
50+
Example:
51+
>>> context = ToolContext(
52+
... number_of_turns=2,
53+
... message_history=[...],
54+
... model="gpt-4",
55+
... previous_tool_results=[...]
56+
... )
57+
"""
58+
59+
number_of_turns: int
60+
message_history: List[Dict[str, Any]]
61+
model: Optional[str]
62+
models: Optional[List[str]]
63+
previous_tool_results: Optional[List[Dict[str, Any]]]
64+
request_id: Optional[str]
65+
66+
67+
class ResponseState(str, Enum):
68+
"""State of the ResponseWrapper.
69+
70+
Tracks the lifecycle of a response from initialization through
71+
completion or error.
72+
73+
Attributes:
74+
INITIALIZED: Response created but not yet consumed
75+
STREAMING: Currently consuming the stream
76+
COMPLETED: Stream fully consumed successfully
77+
ERROR: An error occurred during consumption
78+
"""
79+
80+
INITIALIZED = "initialized"
81+
STREAMING = "streaming"
82+
COMPLETED = "completed"
83+
ERROR = "error"
84+
85+
86+
class CachedData(TypedDict, total=False):
87+
"""Cached response data for reuse across consumption methods.
88+
89+
This structure stores parsed data from the response to avoid
90+
redundant parsing when multiple consumption methods are called.
91+
92+
Attributes:
93+
message: Complete message object with all content
94+
text: Extracted text content only
95+
tool_calls: Parsed tool call objects
96+
raw_response: Raw API response dictionary
97+
98+
Example:
99+
>>> cache: CachedData = {
100+
... "message": {"role": "assistant", "content": [...]},
101+
... "text": "The weather is sunny",
102+
... "tool_calls": [...]
103+
... }
104+
"""
105+
106+
message: Optional[Dict[str, Any]]
107+
text: Optional[str]
108+
tool_calls: Optional[List[Dict[str, Any]]]
109+
raw_response: Optional[Dict[str, Any]]

0 commit comments

Comments
 (0)