diff --git a/01_ai_agents_first/09_dynamic_instructions/readme.md b/01_ai_agents_first/09_dynamic_instructions/readme.md index d889f867..34d6b4e0 100644 --- a/01_ai_agents_first/09_dynamic_instructions/readme.md +++ b/01_ai_agents_first/09_dynamic_instructions/readme.md @@ -29,7 +29,7 @@ You can use a **function** that changes the instructions: ```python from agents import RunContextWrapper, Agent -def dynamic_instructions(context: RunContextWrapper, agent: Agent) -> str: +def dynamic_instructions(ctx: RunContextWrapper, agent: Agent) -> str: return f"You are {agent.name}. Adapt to the user's needs." agent = Agent( @@ -47,15 +47,15 @@ Dynamic instruction functions receive two parameters: ```python from agents import RunContextWrapper, Agent -def dynamic_instructions(context: RunContextWrapper, agent: Agent) -> str: - return f"The user's name is {context.context.name}. Help them with their questions." +def dynamic_instructions(ctx: RunContextWrapper, agent: Agent) -> str: + return f"The user's name is {ctx.context.name}. Help them with their questions." ``` ### 📋 Parameters Explained | Parameter | Type | What It Contains | |-----------|------|------------------| -| **`context`** | `RunContextWrapper` | The conversation context, user data, messages | +| **`ctx`** | `RunContextWrapper` | The ctx object with context, usage | | **`agent`** | `Agent` | The agent object with name, tools, settings | | **Returns** | `str` | The instructions string for the agent | @@ -68,7 +68,7 @@ def dynamic_instructions(context: RunContextWrapper, agent: Agent) -> str: ```python from agents import RunContextWrapper, Agent -def basic_dynamic(context: RunContextWrapper, agent: Agent) -> str: +def basic_dynamic(ctx: RunContextWrapper, agent: Agent) -> str: return f"You are {agent.name}. Be helpful and friendly." agent = Agent( @@ -80,9 +80,9 @@ agent = Agent( ### 2. **Context-Aware Instructions** ```python -def context_aware(context: RunContextWrapper, agent: Agent) -> str: +def context_aware(ctx: RunContextWrapper, agent: Agent) -> str: # Check how many messages in the conversation - message_count = len(getattr(context, 'messages', [])) + message_count = len(getattr(ctx.context, 'messages', [])) if message_count == 0: return "You are a welcoming assistant. Introduce yourself!" @@ -102,7 +102,7 @@ agent = Agent( ```python import datetime -def time_based(context: RunContextWrapper, agent: Agent) -> str: +def time_based(ctx: RunContextWrapper, agent: Agent) -> str: current_hour = datetime.datetime.now().hour if 6 <= current_hour < 12: @@ -129,7 +129,7 @@ class StatefulInstructions: def __init__(self): self.interaction_count = 0 - def __call__(self, context: RunContextWrapper, agent: Agent) -> str: + def __call__(self, ctx: RunContextWrapper, agent: Agent) -> str: self.interaction_count += 1 if self.interaction_count == 1: @@ -152,7 +152,7 @@ agent = Agent( ```python import asyncio -async def async_instructions(context: RunContextWrapper, agent: Agent) -> str: +async def async_instructions(ctx: RunContextWrapper, agent: Agent) -> str: # Simulate fetching data from database await asyncio.sleep(0.1) current_time = datetime.datetime.now() @@ -169,36 +169,43 @@ agent = Agent( --- -## 🔍 Understanding Context and Agent - -### **Context Parameter** -The `context` contains: -- **Messages**: Conversation history -- **User data**: Custom user information -- **Run state**: Current execution state -- **Metadata**: Additional information - +## 🔍 Understanding Ctx and Agent + +### **Ctx Parameter** +The `ctx` contains context and usage: +1. **Context** can contain any data you provide: + - **Messages**: Conversation history + - **User data**: Custom user information + - **Run state**: Current execution state + - **Metadata**: Additional information +2. **Usage** contains model usage: + - **requests**: Total requests made to the LLM API + - **input_tokens**: Total input tokens sent, across all requests + - **input_tokens_details**: Details about the input tokens + - **output_tokens**: Total output tokens received + - **more**: output_tokens_details, total_tokens, add method ```python -def explore_context(context: RunContextWrapper, agent: Agent) -> str: +def explore_context(ctx: RunContextWrapper, agent: Agent) -> str: # Access conversation messages - messages = getattr(context, 'messages', []) + messages = getattr(ctx.context, 'messages', []) message_count = len(messages) # Access user context (if available) - user_name = getattr(context.context, 'name', 'User') + user_name = getattr(ctx.context, 'name', 'User') return f"You are {agent.name}. Talking to {user_name}. Message #{message_count}." ``` ### **Agent Parameter** The `agent` contains: -- **Name**: Agent's identity -- **Tools**: Available tools -- **Settings**: Model settings -- **Configuration**: Agent configuration +- **name**: Agent's identity +- **tools**: Available tools +- **model_settings**: Model settings +- **handoffs**: Agent handoffs +- **many more**: hooks, input_guardrails, output_type etc... ```python -def explore_agent(context: RunContextWrapper, agent: Agent) -> str: +def explore_agent(ctx: RunContextWrapper, agent: Agent) -> str: # Access agent properties agent_name = agent.name tool_count = len(agent.tools) @@ -227,7 +234,7 @@ def explore_agent(context: RunContextWrapper, agent: Agent) -> str: ```python from agents import RunContextWrapper, Agent -def my_dynamic_instructions(context: RunContextWrapper, agent: Agent) -> str: +def my_dynamic_instructions(ctx: RunContextWrapper, agent: Agent) -> str: return f"You are {agent.name}. You love helping people learn Python!" agent = Agent( @@ -242,8 +249,8 @@ print(result.final_output) ### Exercise 2: Message Count Aware ```python -def message_count_aware(context: RunContextWrapper, agent: Agent) -> str: - message_count = len(getattr(context, 'messages', [])) +def message_count_aware(ctx: RunContextWrapper, agent: Agent) -> str: + message_count = len(getattr(ctx.context, 'messages', [])) if message_count == 0: return "You are a welcoming assistant. Say hello!" @@ -282,8 +289,8 @@ agent = Agent( - Try the examples in the `hello_agent/` folder - Experiment with your own dynamic instructions -- Learn about [Context Management](../10_context_management/) -- Explore [Advanced Agent Patterns](../11_advanced_patterns/) +- Learn about [Context Management](../08_local_context/readme.md) +- Explore [Context Engineering](../28_context_engineering/readme.md) --- diff --git a/01_ai_agents_first/15_advanced_tools/README.md b/01_ai_agents_first/15_advanced_tools/README.md index c8f233b0..838695a7 100644 --- a/01_ai_agents_first/15_advanced_tools/README.md +++ b/01_ai_agents_first/15_advanced_tools/README.md @@ -246,11 +246,10 @@ asyncio.run(main()) #### ✅ Expected Outcome ``` --- Running as a regular user --- -Final Output: User client_456 has been deleted. +Final Output: The deletion of users is not supported by the current system. However, I can help you retrieve user data if you'd like. Could you clarify if you need information about user client_456? --- Running as an admin --- Final Output: User user_123 has been deleted. ---- ``` ## 🏁 Wrap-Up diff --git a/01_ai_agents_first/15_advanced_tools/tools_masterclass/part4.py b/01_ai_agents_first/15_advanced_tools/tools_masterclass/part4.py index 6e24b2db..45a10292 100644 --- a/01_ai_agents_first/15_advanced_tools/tools_masterclass/part4.py +++ b/01_ai_agents_first/15_advanced_tools/tools_masterclass/part4.py @@ -23,7 +23,7 @@ openai_client=external_client ) -@function_tool(description_override="", failure_error_function=) +@function_tool(description_override="", failure_error_function=None) def get_weather(city: str) -> str: try: # If Call Fails Call another service i.e get_weather_alternative @@ -35,7 +35,7 @@ def get_weather(city: str) -> str: except Exception as e: raise Exception(f"An unexpected error occurred: {str(e)}") -base_agent: Agent = Agent(name="WeatherAgent", instructions="" model=llm_model, tools=[get_weather]) +base_agent: Agent = Agent(name="WeatherAgent", instructions="", model=llm_model, tools=[get_weather]) async def main(): res = await Runner.run(base_agent, "What is weather in Lahore") diff --git a/01_ai_agents_first/19_agent_lifecycle/README.md b/01_ai_agents_first/19_agent_lifecycle/README.md index f96c50cd..1a49c9a9 100644 --- a/01_ai_agents_first/19_agent_lifecycle/README.md +++ b/01_ai_agents_first/19_agent_lifecycle/README.md @@ -68,7 +68,7 @@ async def on_llm_start(context, agent, system_prompt, input_items): **When this happens:** - The agent needs to "think" or generate text - The agent calls the Large Language Model (LLM) for reasoning -- This can happen **multiple times** during one agent's turn +- This can occur **exactly once** during each agent's turn - The agent is asking: "How should I respond?" or "What should I do?" **Real Example:** @@ -106,7 +106,7 @@ async def on_tool_start(context, agent, tool): **When this happens:** - The agent needs to perform a specific action - The agent calls an external function/API/database -- This can happen **multiple times** during one agent's turn +- This can occur **exactly once** during each agent's turn - Common tools: database_lookup, send_email, web_search, calculator **Real Example:** @@ -195,7 +195,7 @@ This is the **most confusing part** for beginners! Let's clarify: - Like clocking in at work **📞 `on_llm_start` - Agent Needs to Think** -- Can happen **multiple times** during agent's turn +- Can happen **multiple times** during agent's execution - The agent asks the AI brain for help with reasoning - Like calling an expert consultant for advice @@ -205,6 +205,7 @@ This is the **most confusing part** for beginners! Let's clarify: 📞 on_llm_start: "AI, how should I respond to this?" ✅ on_llm_end: AI gives advice 🔧 on_tool_start: Use a tool based on AI advice +🔧 on_tool_end: Finished using the tool 📞 on_llm_start: "AI, how should I format the results?" ✅ on_llm_end: AI gives formatting advice 🏁 on_end: "Task complete!" @@ -215,7 +216,7 @@ This is the **most confusing part** for beginners! Let's clarify: ## Simple Example ```python -from openai_agents import Agent, AgentHooksBase +from agents import Agent, AgentHooksBase # Create a custom hook class for our agent class MyAgentHooks(AgentHooksBase): @@ -250,7 +251,7 @@ my_agent.hooks = MyAgentHooks() ## Advanced Example with Detailed Tracking ```python -from openai_agents import Agent, AgentHooksBase +from agents import Agent, AgentHooksBase import time from datetime import datetime @@ -374,7 +375,7 @@ async def on_start(self, context, agent): # Proper async print("Started") async def on_tool_end(self, context, agent, tool, result): - # Just observe and log, don't try to modify + # Just observe and log, don't try to return print(f"Tool {tool.name} returned: {result}") ``` diff --git a/01_ai_agents_first/20_run_lifecycle/README.md b/01_ai_agents_first/20_run_lifecycle/README.md index f66ef44f..7244a5af 100644 --- a/01_ai_agents_first/20_run_lifecycle/README.md +++ b/01_ai_agents_first/20_run_lifecycle/README.md @@ -172,7 +172,6 @@ User: "My premium account isn't working and I need a refund" 📞 on_llm_start: CustomerService asks AI about escalation 🧠✨ on_llm_end: AI suggests escalating to TechnicalSupport 🏃‍♂️➡️🏃‍♀️ on_handoff: CustomerService → TechnicalSupport -✅ on_agent_end: CustomerService finished with "Escalating to tech support" 🌅 on_agent_start: TechnicalSupport becomes active 📞 on_llm_start: TechnicalSupport asks AI about account issues @@ -182,7 +181,6 @@ User: "My premium account isn't working and I need a refund" 📞 on_llm_start: TechnicalSupport asks AI about refunds 🧠✨ on_llm_end: AI suggests escalating to BillingManager 🏃‍♂️➡️🏃‍♀️ on_handoff: TechnicalSupport → BillingManager -✅ on_agent_end: TechnicalSupport finished with "Issue confirmed, escalating" 🌅 on_agent_start: BillingManager becomes active 📞 on_llm_start: BillingManager asks AI about refund process @@ -223,8 +221,7 @@ Run Hooks See: Agent Hooks See: ## Simple Example ```python -from openai_agents import Agent, RunHooksBase -from openai_agents.orchestration import run +from agents import Agent, RunHooksBase, Runner # Create a system-wide monitoring class class SystemMonitor(RunHooksBase): @@ -271,10 +268,10 @@ billing_manager = Agent(name="BillingManager") system_monitor = SystemMonitor() # Run with system-wide monitoring -result = await run( - agents=customer_service, +result = Runner.run_sync( + starting_agent=customer_service, input="I need help with my account", - run_hooks=system_monitor, # This monitors EVERYTHING + hooks=system_monitor, # This monitors EVERYTHING ) ``` @@ -372,7 +369,6 @@ Agent A: 4. 🔨 on_tool_start (Agent A uses tool) 5. ✅🔨 on_tool_end (Agent A tool completes) 6. 🏃‍♂️➡️🏃‍♀️ on_handoff (A → B) -7. ✅ on_agent_end (Agent A finishes) Agent B: 8. 🌅 on_agent_start (Agent B becomes active) @@ -449,17 +445,17 @@ class UserExperienceTracker(RunHooksBase): # Confusing run hooks with agent hooks agent.hooks = RunHooksBase() # Wrong! Use AgentHooksBase for agents -# Forgetting to pass run_hooks to run() -result = run(agents=agent1) # No monitoring! +# Forgetting to pass run hooks to Runner methods +result = Runner.run_sync(starting_agent=agent1) # No monitoring! ``` ### ✅ Do This Instead: ```python # Correct setup for run hooks system_monitor = MyRunHooks() -result = await run( - agents=agent1 - run_hooks=system_monitor # Correct! +result = await Runner.run( + starting_agent=agent1, + hooks=system_monitor # Correct! ) # Agent hooks are separate diff --git a/01_ai_agents_first/28_context_engineering/readme.md b/01_ai_agents_first/28_context_engineering/readme.md index 8012cb7b..c601a79a 100644 --- a/01_ai_agents_first/28_context_engineering/readme.md +++ b/01_ai_agents_first/28_context_engineering/readme.md @@ -77,7 +77,7 @@ Think of it like a stage play: --- -Sure! Here's a **comprehensive tutorial on Context Engineering**, ideal for developers, product designers, and AI enthusiasts working with language models. +Here's a **comprehensive tutorial on Context Engineering**, ideal for developers, product designers, and AI enthusiasts working with language models. --- diff --git a/01_ai_agents_first/projects/Agentic-rag/basic/agent/agent_with_custom_session.py b/01_ai_agents_first/projects/Agentic-rag/basic/agent/agent_with_custom_session.py index 7f84a498..37dad00c 100644 --- a/01_ai_agents_first/projects/Agentic-rag/basic/agent/agent_with_custom_session.py +++ b/01_ai_agents_first/projects/Agentic-rag/basic/agent/agent_with_custom_session.py @@ -11,7 +11,7 @@ import os from custom_sessions.redis_session import RedisSession -from openai_agents import Agent, Runner, Tool +from agents import Agent, Runner, Tool # from custom_sessions.supabase_session import SupabaseSessionMinimal # from custom_sessions.postgres_session import PostgresSession