1
1
#!/usr/bin/env python3
2
2
"""
3
- AtomicAgent Hook System Demonstration
3
+ AtomicAgent Hook System Demo
4
4
5
- This example showcases the powerful hook system integration in AtomicAgent,
6
- demonstrating monitoring, error handling, and intelligent retry mechanisms.
7
-
8
- The hook system leverages Instructor's hook system to provide:
9
- - Comprehensive monitoring of agent execution
10
- - Robust error handling and recovery
11
- - Performance metrics and insights
12
- - Intelligent retry patterns
13
-
14
- Run with: python main.py
5
+ Shows how to monitor agent execution with hooks.
6
+ Includes error handling and performance metrics.
15
7
"""
16
8
17
9
import os
29
21
from atomic_agents .context import ChatHistory
30
22
from atomic_agents .base .base_io_schema import BaseIOSchema
31
23
32
- # Configure logging
33
24
logging .basicConfig (level = logging .INFO , format = "%(asctime)s - %(levelname)s - %(message)s" )
34
25
logger = logging .getLogger (__name__ )
35
-
36
- # Initialize Rich console for pretty output
37
26
console = Console ()
38
-
39
- # Global metrics storage
40
27
metrics = {
41
28
"total_requests" : 0 ,
42
29
"successful_requests" : 0 ,
47
34
"start_time" : time .time (),
48
35
}
49
36
50
- # Global variable to track request start times
51
37
_request_start_time = None
52
38
53
39
54
40
class UserQuery (BaseIOSchema ):
55
- """User input schema for the agent."""
56
-
57
41
chat_message : str = Field (..., description = "User's question or message" )
58
42
59
43
60
44
class AgentResponse (BaseIOSchema ):
61
- """Agent response schema with confidence scoring."""
62
-
63
45
chat_message : str = Field (..., description = "Agent's response to the user" )
64
46
confidence : float = Field (..., ge = 0.0 , le = 1.0 , description = "Confidence score (0.0-1.0)" )
65
47
reasoning : str = Field (..., description = "Brief explanation of the reasoning" )
66
48
67
49
68
50
class DetailedResponse (BaseIOSchema ):
69
- """More complex response schema to test validation."""
70
-
71
51
chat_message : str = Field (..., description = "Primary response" )
72
52
alternative_suggestions : list [str ] = Field (default_factory = list , description = "Alternative suggestions" )
73
53
confidence_level : str = Field (..., description = "Must be 'low', 'medium', or 'high'" )
74
54
requires_followup : bool = Field (default = False , description = "Whether follow-up is needed" )
75
55
76
56
77
57
def setup_api_key () -> str :
78
- """Setup and validate API key."""
79
58
api_key = os .getenv ("OPENAI_API_KEY" )
80
59
if not api_key :
81
60
console .print ("[bold red]Error: OPENAI_API_KEY environment variable not set.[/bold red]" )
@@ -85,10 +64,8 @@ def setup_api_key() -> str:
85
64
86
65
87
66
def display_metrics ():
88
- """Display current performance metrics."""
89
67
runtime = time .time () - metrics ["start_time" ]
90
68
avg_response_time = metrics ["total_response_time" ] / metrics ["total_requests" ] if metrics ["total_requests" ] > 0 else 0
91
-
92
69
success_rate = metrics ["successful_requests" ] / metrics ["total_requests" ] * 100 if metrics ["total_requests" ] > 0 else 0
93
70
94
71
table = Table (title = "🔍 Hook System Performance Metrics" , style = "cyan" )
@@ -107,14 +84,9 @@ def display_metrics():
107
84
console .print (table )
108
85
109
86
110
- # Hook functions for comprehensive monitoring and error handling
111
-
112
-
113
87
def on_parse_error (error ):
114
- """Handle parse errors with detailed logging and metrics update."""
115
88
metrics ["parse_errors" ] += 1
116
89
metrics ["failed_requests" ] += 1
117
-
118
90
logger .error (f"🚨 Parse error occurred: { type (error ).__name__ } : { error } " )
119
91
120
92
if isinstance (error , ValidationError ):
@@ -126,37 +98,24 @@ def on_parse_error(error):
126
98
else :
127
99
console .print (f"[bold red]❌ Parse Error:[/bold red] { error } " )
128
100
129
- # In a production system, you might:
130
- # - Send error to monitoring service
131
- # - Trigger alerts for high error rates
132
- # - Store detailed error context for analysis
133
-
134
101
135
102
def on_completion_kwargs (** kwargs ):
136
- """Monitor outgoing API calls."""
137
103
global _request_start_time
138
104
metrics ["total_requests" ] += 1
139
-
140
- # Log API call details (without sensitive information)
141
105
model = kwargs .get ("model" , "unknown" )
142
106
messages_count = len (kwargs .get ("messages" , []))
143
-
144
107
logger .info (f"🚀 API call starting - Model: { model } , Messages: { messages_count } " )
145
-
146
- # Store start time for response time calculation
147
108
_request_start_time = time .time ()
148
109
149
110
150
111
def on_completion_response (response , ** kwargs ):
151
- """Monitor API responses and calculate metrics."""
152
112
global _request_start_time
153
113
if _request_start_time :
154
114
response_time = time .time () - _request_start_time
155
115
metrics ["total_response_time" ] += response_time
156
116
logger .info (f"✅ API call completed in { response_time :.2f} s" )
157
- _request_start_time = None # Reset for next request
117
+ _request_start_time = None
158
118
159
- # Log response details
160
119
if hasattr (response , "usage" ):
161
120
usage = response .usage
162
121
logger .info (
@@ -169,41 +128,31 @@ def on_completion_response(response, **kwargs):
169
128
170
129
171
130
def on_completion_error (error , ** kwargs ):
172
- """Handle API errors with retry logic."""
173
131
global _request_start_time
174
132
metrics ["failed_requests" ] += 1
175
133
metrics ["retry_attempts" ] += 1
176
134
177
- # Reset start time on error
178
135
if _request_start_time :
179
136
_request_start_time = None
180
137
181
138
logger .error (f"🔥 API error: { type (error ).__name__ } : { error } " )
182
139
console .print (f"[bold red]🔥 API Error:[/bold red] { error } " )
183
140
184
- # In a production system, you might implement:
185
- # - Exponential backoff retry logic
186
- # - Fallback to different models
187
- # - Circuit breaker patterns
188
- # - Dead letter queues for failed requests
189
-
190
141
191
142
def create_agent_with_hooks (schema_type : type , system_prompt : str = None ) -> AtomicAgent :
192
- """Create an AtomicAgent with comprehensive hook setup."""
193
143
api_key = setup_api_key ()
194
144
client = instructor .from_openai (openai .OpenAI (api_key = api_key ))
195
145
196
- # Create agent with configuration
197
146
config = AgentConfig (
198
147
client = client ,
199
- model = "gpt-4o-mini" , # Using a reliable model for demonstrations
148
+ model = "gpt-5-mini" ,
149
+ model_api_parameters = {"reasoning_effort" : "low" },
200
150
history = ChatHistory (),
201
151
system_prompt = system_prompt ,
202
152
)
203
153
204
154
agent = AtomicAgent [UserQuery , schema_type ](config )
205
155
206
- # Register comprehensive hook suite
207
156
agent .register_hook ("parse:error" , on_parse_error )
208
157
agent .register_hook ("completion:kwargs" , on_completion_kwargs )
209
158
agent .register_hook ("completion:response" , on_completion_response )
@@ -214,10 +163,8 @@ def create_agent_with_hooks(schema_type: type, system_prompt: str = None) -> Ato
214
163
215
164
216
165
def demonstrate_basic_hooks ():
217
- """Demonstrate basic hook functionality with simple responses."""
218
166
console .print (Panel ("🔧 Basic Hook System Demonstration" , style = "bold blue" ))
219
167
220
- # Create agent with basic schema
221
168
agent = create_agent_with_hooks (
222
169
AgentResponse , "You are a helpful assistant. Always provide confident, well-reasoned responses."
223
170
)
@@ -246,10 +193,8 @@ def demonstrate_basic_hooks():
246
193
247
194
248
195
def demonstrate_validation_errors ():
249
- """Demonstrate hook system handling validation errors."""
250
196
console .print (Panel ("🚨 Validation Error Handling Demonstration" , style = "bold red" ))
251
197
252
- # Create agent with strict validation schema
253
198
agent = create_agent_with_hooks (
254
199
DetailedResponse ,
255
200
"""You are a helpful assistant. You must respond with:
@@ -261,7 +206,6 @@ def demonstrate_validation_errors():
261
206
Be very strict about the confidence_level field - it must be exactly one of the three allowed values.""" ,
262
207
)
263
208
264
- # Queries designed to potentially trigger validation errors
265
209
validation_test_queries = [
266
210
"Give me a simple yes or no answer about whether the sky is blue." ,
267
211
"Provide a complex analysis of climate change with multiple perspectives." ,
@@ -286,7 +230,6 @@ def demonstrate_validation_errors():
286
230
287
231
288
232
def demonstrate_interactive_mode ():
289
- """Interactive demonstration where users can test the hook system."""
290
233
console .print (Panel ("🎮 Interactive Hook System Testing" , style = "bold magenta" ))
291
234
292
235
agent = create_agent_with_hooks (
@@ -329,7 +272,6 @@ def demonstrate_interactive_mode():
329
272
330
273
331
274
def main ():
332
- """Main demonstration function."""
333
275
console .print (Panel .fit ("🎯 AtomicAgent Hook System Comprehensive Demo" , style = "bold green" ))
334
276
335
277
console .print (
@@ -346,17 +288,10 @@ def main():
346
288
)
347
289
348
290
try :
349
- # Run basic demonstration
350
291
demonstrate_basic_hooks ()
351
-
352
292
console .print ("\n " + "=" * 50 )
353
-
354
- # Run validation error demonstration
355
293
demonstrate_validation_errors ()
356
-
357
294
console .print ("\n " + "=" * 50 )
358
-
359
- # Interactive mode
360
295
demonstrate_interactive_mode ()
361
296
362
297
except KeyboardInterrupt :
@@ -365,7 +300,6 @@ def main():
365
300
console .print (f"\n [bold red]Demo error:[/bold red] { e } " )
366
301
logger .error (f"Demo error: { e } " , exc_info = True )
367
302
finally :
368
- # Final metrics display
369
303
console .print ("\n " + "=" * 50 )
370
304
console .print (Panel ("📊 Final Performance Summary" , style = "bold green" ))
371
305
display_metrics ()
0 commit comments