Skip to content

Commit 12db8dc

Browse files
linting, formatting
1 parent ff123b1 commit 12db8dc

File tree

2 files changed

+8
-74
lines changed

2 files changed

+8
-74
lines changed

atomic-agents/tests/agents/test_atomic_agent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -631,12 +631,12 @@ def test_backward_compatibility_no_breaking_changes(mock_instructor, mock_histor
631631
"""Test that hook system addition doesn't break existing functionality."""
632632
# Ensure mock_history.get_history() returns an empty list
633633
mock_history.get_history.return_value = []
634-
634+
635635
# Ensure the copy method returns a properly configured mock
636636
copied_mock = Mock(spec=ChatHistory)
637637
copied_mock.get_history.return_value = []
638638
mock_history.copy.return_value = copied_mock
639-
639+
640640
config = AgentConfig(
641641
client=mock_instructor,
642642
model="gpt-4o-mini",

atomic-examples/hooks-example/hooks_example/main.py

Lines changed: 6 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,9 @@
11
#!/usr/bin/env python3
22
"""
3-
AtomicAgent Hook System Demonstration
3+
AtomicAgent Hook System Demo
44
5-
This example showcases the powerful hook system integration in AtomicAgent,
6-
demonstrating monitoring, error handling, and intelligent retry mechanisms.
7-
8-
The hook system leverages Instructor's hook system to provide:
9-
- Comprehensive monitoring of agent execution
10-
- Robust error handling and recovery
11-
- Performance metrics and insights
12-
- Intelligent retry patterns
13-
14-
Run with: python main.py
5+
Shows how to monitor agent execution with hooks.
6+
Includes error handling and performance metrics.
157
"""
168

179
import os
@@ -29,14 +21,9 @@
2921
from atomic_agents.context import ChatHistory
3022
from atomic_agents.base.base_io_schema import BaseIOSchema
3123

32-
# Configure logging
3324
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
3425
logger = logging.getLogger(__name__)
35-
36-
# Initialize Rich console for pretty output
3726
console = Console()
38-
39-
# Global metrics storage
4027
metrics = {
4128
"total_requests": 0,
4229
"successful_requests": 0,
@@ -47,35 +34,27 @@
4734
"start_time": time.time(),
4835
}
4936

50-
# Global variable to track request start times
5137
_request_start_time = None
5238

5339

5440
class UserQuery(BaseIOSchema):
55-
"""User input schema for the agent."""
56-
5741
chat_message: str = Field(..., description="User's question or message")
5842

5943

6044
class AgentResponse(BaseIOSchema):
61-
"""Agent response schema with confidence scoring."""
62-
6345
chat_message: str = Field(..., description="Agent's response to the user")
6446
confidence: float = Field(..., ge=0.0, le=1.0, description="Confidence score (0.0-1.0)")
6547
reasoning: str = Field(..., description="Brief explanation of the reasoning")
6648

6749

6850
class DetailedResponse(BaseIOSchema):
69-
"""More complex response schema to test validation."""
70-
7151
chat_message: str = Field(..., description="Primary response")
7252
alternative_suggestions: list[str] = Field(default_factory=list, description="Alternative suggestions")
7353
confidence_level: str = Field(..., description="Must be 'low', 'medium', or 'high'")
7454
requires_followup: bool = Field(default=False, description="Whether follow-up is needed")
7555

7656

7757
def setup_api_key() -> str:
78-
"""Setup and validate API key."""
7958
api_key = os.getenv("OPENAI_API_KEY")
8059
if not api_key:
8160
console.print("[bold red]Error: OPENAI_API_KEY environment variable not set.[/bold red]")
@@ -85,10 +64,8 @@ def setup_api_key() -> str:
8564

8665

8766
def display_metrics():
88-
"""Display current performance metrics."""
8967
runtime = time.time() - metrics["start_time"]
9068
avg_response_time = metrics["total_response_time"] / metrics["total_requests"] if metrics["total_requests"] > 0 else 0
91-
9269
success_rate = metrics["successful_requests"] / metrics["total_requests"] * 100 if metrics["total_requests"] > 0 else 0
9370

9471
table = Table(title="🔍 Hook System Performance Metrics", style="cyan")
@@ -107,14 +84,9 @@ def display_metrics():
10784
console.print(table)
10885

10986

110-
# Hook functions for comprehensive monitoring and error handling
111-
112-
11387
def on_parse_error(error):
114-
"""Handle parse errors with detailed logging and metrics update."""
11588
metrics["parse_errors"] += 1
11689
metrics["failed_requests"] += 1
117-
11890
logger.error(f"🚨 Parse error occurred: {type(error).__name__}: {error}")
11991

12092
if isinstance(error, ValidationError):
@@ -126,37 +98,24 @@ def on_parse_error(error):
12698
else:
12799
console.print(f"[bold red]❌ Parse Error:[/bold red] {error}")
128100

129-
# In a production system, you might:
130-
# - Send error to monitoring service
131-
# - Trigger alerts for high error rates
132-
# - Store detailed error context for analysis
133-
134101

135102
def on_completion_kwargs(**kwargs):
136-
"""Monitor outgoing API calls."""
137103
global _request_start_time
138104
metrics["total_requests"] += 1
139-
140-
# Log API call details (without sensitive information)
141105
model = kwargs.get("model", "unknown")
142106
messages_count = len(kwargs.get("messages", []))
143-
144107
logger.info(f"🚀 API call starting - Model: {model}, Messages: {messages_count}")
145-
146-
# Store start time for response time calculation
147108
_request_start_time = time.time()
148109

149110

150111
def on_completion_response(response, **kwargs):
151-
"""Monitor API responses and calculate metrics."""
152112
global _request_start_time
153113
if _request_start_time:
154114
response_time = time.time() - _request_start_time
155115
metrics["total_response_time"] += response_time
156116
logger.info(f"✅ API call completed in {response_time:.2f}s")
157-
_request_start_time = None # Reset for next request
117+
_request_start_time = None
158118

159-
# Log response details
160119
if hasattr(response, "usage"):
161120
usage = response.usage
162121
logger.info(
@@ -169,41 +128,31 @@ def on_completion_response(response, **kwargs):
169128

170129

171130
def on_completion_error(error, **kwargs):
172-
"""Handle API errors with retry logic."""
173131
global _request_start_time
174132
metrics["failed_requests"] += 1
175133
metrics["retry_attempts"] += 1
176134

177-
# Reset start time on error
178135
if _request_start_time:
179136
_request_start_time = None
180137

181138
logger.error(f"🔥 API error: {type(error).__name__}: {error}")
182139
console.print(f"[bold red]🔥 API Error:[/bold red] {error}")
183140

184-
# In a production system, you might implement:
185-
# - Exponential backoff retry logic
186-
# - Fallback to different models
187-
# - Circuit breaker patterns
188-
# - Dead letter queues for failed requests
189-
190141

191142
def create_agent_with_hooks(schema_type: type, system_prompt: str = None) -> AtomicAgent:
192-
"""Create an AtomicAgent with comprehensive hook setup."""
193143
api_key = setup_api_key()
194144
client = instructor.from_openai(openai.OpenAI(api_key=api_key))
195145

196-
# Create agent with configuration
197146
config = AgentConfig(
198147
client=client,
199-
model="gpt-4o-mini", # Using a reliable model for demonstrations
148+
model="gpt-5-mini",
149+
model_api_parameters={"reasoning_effort": "low"},
200150
history=ChatHistory(),
201151
system_prompt=system_prompt,
202152
)
203153

204154
agent = AtomicAgent[UserQuery, schema_type](config)
205155

206-
# Register comprehensive hook suite
207156
agent.register_hook("parse:error", on_parse_error)
208157
agent.register_hook("completion:kwargs", on_completion_kwargs)
209158
agent.register_hook("completion:response", on_completion_response)
@@ -214,10 +163,8 @@ def create_agent_with_hooks(schema_type: type, system_prompt: str = None) -> Ato
214163

215164

216165
def demonstrate_basic_hooks():
217-
"""Demonstrate basic hook functionality with simple responses."""
218166
console.print(Panel("🔧 Basic Hook System Demonstration", style="bold blue"))
219167

220-
# Create agent with basic schema
221168
agent = create_agent_with_hooks(
222169
AgentResponse, "You are a helpful assistant. Always provide confident, well-reasoned responses."
223170
)
@@ -246,10 +193,8 @@ def demonstrate_basic_hooks():
246193

247194

248195
def demonstrate_validation_errors():
249-
"""Demonstrate hook system handling validation errors."""
250196
console.print(Panel("🚨 Validation Error Handling Demonstration", style="bold red"))
251197

252-
# Create agent with strict validation schema
253198
agent = create_agent_with_hooks(
254199
DetailedResponse,
255200
"""You are a helpful assistant. You must respond with:
@@ -261,7 +206,6 @@ def demonstrate_validation_errors():
261206
Be very strict about the confidence_level field - it must be exactly one of the three allowed values.""",
262207
)
263208

264-
# Queries designed to potentially trigger validation errors
265209
validation_test_queries = [
266210
"Give me a simple yes or no answer about whether the sky is blue.",
267211
"Provide a complex analysis of climate change with multiple perspectives.",
@@ -286,7 +230,6 @@ def demonstrate_validation_errors():
286230

287231

288232
def demonstrate_interactive_mode():
289-
"""Interactive demonstration where users can test the hook system."""
290233
console.print(Panel("🎮 Interactive Hook System Testing", style="bold magenta"))
291234

292235
agent = create_agent_with_hooks(
@@ -329,7 +272,6 @@ def demonstrate_interactive_mode():
329272

330273

331274
def main():
332-
"""Main demonstration function."""
333275
console.print(Panel.fit("🎯 AtomicAgent Hook System Comprehensive Demo", style="bold green"))
334276

335277
console.print(
@@ -346,17 +288,10 @@ def main():
346288
)
347289

348290
try:
349-
# Run basic demonstration
350291
demonstrate_basic_hooks()
351-
352292
console.print("\n" + "=" * 50)
353-
354-
# Run validation error demonstration
355293
demonstrate_validation_errors()
356-
357294
console.print("\n" + "=" * 50)
358-
359-
# Interactive mode
360295
demonstrate_interactive_mode()
361296

362297
except KeyboardInterrupt:
@@ -365,7 +300,6 @@ def main():
365300
console.print(f"\n[bold red]Demo error:[/bold red] {e}")
366301
logger.error(f"Demo error: {e}", exc_info=True)
367302
finally:
368-
# Final metrics display
369303
console.print("\n" + "=" * 50)
370304
console.print(Panel("📊 Final Performance Summary", style="bold green"))
371305
display_metrics()

0 commit comments

Comments
 (0)