Skip to content

Commit 3145b2c

Browse files
authored
fix: Sync with the latest example notebooks (#1089)
* sync with the latest example notebooks * set correct google genai example doc * thanks Copilot
1 parent 210dac7 commit 3145b2c

File tree

15 files changed

+510
-500
lines changed

15 files changed

+510
-500
lines changed

docs/v2/examples/autogen.mdx

Lines changed: 35 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
---
2-
title: 'Autogen Example'
3-
description: 'Using Autogen with AgentOps for agent chat monitoring'
2+
title: 'AutoGen'
3+
description: 'Microsoft Autogen Chat Example'
44
---
55
{/* SOURCE_FILE: examples/autogen/AgentChat.ipynb */}
66

@@ -10,25 +10,26 @@ _View Notebook on <a href={'https://github.com/AgentOps-AI/agentops/blob/main/ex
1010

1111
AgentOps automatically configures itself when it's initialized meaning your agent run data will be tracked and logged to your AgentOps dashboard right away.
1212

13-
First let's install the required packages.
13+
First let's install the required packages
14+
15+
1416

1517
## Installation
1618
<CodeGroup>
1719
```bash pip
18-
pip install -U autogen-agentchat "autogen-ext[openai]" agentops python-dotenv
20+
pip install "autogen-ext[openai]" -U agentops autogen-agentchat python-dotenv
1921
```
2022
```bash poetry
21-
poetry add autogen-agentchat autogen-ext agentops python-dotenv
22-
# For autogen-ext[openai] with poetry, you might need to specify openai as an extra:
23-
# poetry add autogen-ext -E openai
23+
poetry add "autogen-ext[openai]" -U agentops autogen-agentchat python-dotenv
2424
```
2525
```bash uv
26-
uv add autogen-agentchat "autogen-ext[openai]" agentops python-dotenv
26+
uv add "autogen-ext[openai]" -U agentops autogen-agentchat python-dotenv
2727
```
2828
</CodeGroup>
2929

30-
## Setup
31-
Then import them.
30+
Then import them
31+
32+
3233
```python
3334
import os
3435
from dotenv import load_dotenv
@@ -51,23 +52,27 @@ Next, we'll set our API keys. There are several ways to do this, the code below
5152

5253
[Get an AgentOps API key](https://agentops.ai/settings/projects)
5354

54-
1. Create an environment variable in a `.env` file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...
55+
1. Create an environment variable in a .env file or other method. By default, the AgentOps `init()` function will look for an environment variable named `AGENTOPS_API_KEY`. Or...
5556

5657
2. Replace `<your_agentops_key>` below and pass in the optional `api_key` parameter to the AgentOps `init(api_key=...)` function. Remember not to commit your API key to a public repo!
5758

59+
5860
```python
5961
load_dotenv()
60-
os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_agentops_api_key_here")
62+
os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY", "your_api_key_here")
6163
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here")
6264
```
6365

66+
6467
```python
6568
# When initializing AgentOps, you can pass in optional tags to help filter sessions
6669
agentops.init(auto_start_session=False)
67-
tracer = agentops.start_trace(trace_name="Microsoft Agent Chat Example", tags=["autogen-chat", "microsoft-autogen", "agentops-example"])
70+
tracer = agentops.start_trace(
71+
trace_name="Microsoft Agent Chat Example", tags=["autogen-chat", "microsoft-autogen", "agentops-example"]
72+
)
6873
```
6974

70-
AutoGen will now start automatically tracking:
75+
AutoGen will now start automatically tracking
7176

7277
* LLM prompts and completions
7378
* Token usage and costs
@@ -77,9 +82,11 @@ AutoGen will now start automatically tracking:
7782
* Errors
7883

7984
# Simple Chat Example
85+
86+
8087
```python
8188
# Define model and API key
82-
model_name = "gpt-4-turbo" # Or "gpt-4o" / "gpt-4o-mini" as per migration guide examples
89+
model_name = "gpt-4-turbo" # Or "gpt-4o" / "gpt-4o-mini" as per migration guide examples
8390
api_key = os.getenv("OPENAI_API_KEY")
8491

8592
# Create the model client
@@ -88,26 +95,27 @@ model_client = OpenAIChatCompletionClient(model=model_name, api_key=api_key)
8895
# Create the agent that uses the LLM.
8996
assistant = AssistantAgent(
9097
name="assistant",
91-
system_message="You are a helpful assistant.", # Added system message for clarity
92-
model_client=model_client
98+
system_message="You are a helpful assistant.", # Added system message for clarity
99+
model_client=model_client,
93100
)
94101

95102
user_proxy_initiator = UserProxyAgent("user_initiator")
96103

104+
97105
async def main():
98-
termination = MaxMessageTermination(max_messages=2)
106+
termination = MaxMessageTermination(max_messages=2)
99107

100108
group_chat = RoundRobinGroupChat(
101-
[user_proxy_initiator, assistant], # Corrected: agents as positional argument
102-
termination_condition=termination
109+
[user_proxy_initiator, assistant], # Corrected: agents as positional argument
110+
termination_condition=termination,
103111
)
104-
112+
105113
chat_task = "How can I help you today?"
106114
print(f"User Initiator: {chat_task}")
107115

108116
try:
109117
stream = group_chat.run_stream(task=chat_task)
110-
await Console().run(stream)
118+
await Console().run(stream)
111119
agentops.end_trace(tracer, end_state="Success")
112120

113121
except StdinNotImplementedError:
@@ -120,14 +128,16 @@ async def main():
120128
finally:
121129
await model_client.close()
122130

123-
if __name__ == '__main__':
131+
132+
if __name__ == "__main__":
124133
try:
125134
loop = asyncio.get_running_loop()
126135
except RuntimeError:
127136
loop = None
128137

129138
if loop and loop.is_running():
130139
import nest_asyncio
140+
131141
nest_asyncio.apply()
132142
asyncio.run(main())
133143
else:
@@ -138,7 +148,8 @@ You can view data on this run at [app.agentops.ai](https://app.agentops.ai).
138148

139149
The dashboard will display LLM events for each message sent by each agent, including those made by the human user.
140150

151+
141152
<script type="module" src="/scripts/github_stars.js"></script>
142153
<script type="module" src="/scripts/scroll-img-fadein-animation.js"></script>
143154
<script type="module" src="/scripts/button_heartbeat_animation.js"></script>
144-
<script type="module" src="/scripts/adjust_api_dynamically.js"></script>
155+
<script type="module" src="/scripts/adjust_api_dynamically.js"></script>

docs/v2/examples/examples.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ description: 'Examples of AgentOps with various integrations'
1313
Claude integration with tool usage and advanced features
1414
</Card>
1515

16-
<Card title="Google Generative AI" icon={<img src="https://www.github.com/agentops-ai/agentops/blob/main/docs/images/external/deepmind/gemini-logo.png?raw=true" alt="Gemini" />} iconType="image" href="/v2/examples/google_generative_ai">
16+
<Card title="Google Generative AI" icon={<img src="https://www.github.com/agentops-ai/agentops/blob/main/docs/images/external/deepmind/gemini-logo.png?raw=true" alt="Gemini" />} iconType="image" href="/v2/examples/google_genai">
1717
Google Gemini models and their examples
1818
</Card>
1919

docs/v2/examples/google_adk.mdx

Lines changed: 25 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
---
2-
title: 'Google Adk'
2+
title: 'Google ADK'
33
description: 'Google ADK Example: Human Approval Workflow with AgentOps'
44
---
55
{/* SOURCE_FILE: examples/google_adk/human_approval.ipynb */}
@@ -74,7 +74,7 @@ APP_NAME = "human_approval_app_notebook"
7474
USER_ID = "test_user_notebook_123"
7575
SESSION_ID = "approval_session_notebook_456"
7676
MODEL_NAME = "gemini-1.5-flash"
77-
agentops.start_trace(trace_name=APP_NAME, tags=["google_adk","notebook"])
77+
agentops.start_trace(trace_name=APP_NAME, tags=["google_adk", "notebook"])
7878
```
7979

8080
## 3. Define Schemas
@@ -86,6 +86,8 @@ Pydantic models are used to define the structure of data for approval requests a
8686
class ApprovalRequest(BaseModel):
8787
amount: float = Field(description="The amount requiring approval")
8888
reason: str = Field(description="The reason for the request")
89+
90+
8991
class ApprovalDecision(BaseModel):
9092
decision: str = Field(description="The approval decision: 'approved' or 'rejected'")
9193
comments: str = Field(description="Additional comments from the approver")
@@ -98,10 +100,10 @@ This tool now directly prompts the user for an approval decision. In a real-worl
98100

99101
```python
100102
async def external_approval_tool(amount: float, reason: str) -> str:
101-
"""
103+
"""
102104
Prompts for human approval and returns the decision as a JSON string.
103105
"""
104-
print(f"🔔 HUMAN APPROVAL REQUIRED:")
106+
print("🔔 HUMAN APPROVAL REQUIRED:")
105107
print(f" Amount: ${amount:,.2f}")
106108
print(f" Reason: {reason}")
107109
decision = ""
@@ -112,12 +114,8 @@ async def external_approval_tool(amount: float, reason: str) -> str:
112114
comments = input(" Enter comments (optional): ").strip()
113115
print(f" Decision: {decision.upper()}")
114116
print(f" Comments: {comments if comments else 'N/A'}")
115-
return json.dumps({
116-
"decision": decision,
117-
"comments": comments,
118-
"amount": amount,
119-
"reason": reason
120-
})
117+
return json.dumps({"decision": decision, "comments": comments, "amount": amount, "reason": reason})
118+
121119

122120
# Create the approval tool instance
123121
approval_tool = FunctionTool(func=external_approval_tool)
@@ -145,13 +143,13 @@ prepare_request = LlmAgent(
145143
4. Respond with a summary of what will be submitted for approval
146144
If the user input is missing amount or reason, ask for clarification.
147145
""",
148-
output_key="request_prepared"
146+
output_key="request_prepared",
149147
)
150148

151149
# Agent 2: Request human approval using the tool
152150
request_approval = LlmAgent(
153151
model=MODEL_NAME,
154-
name="RequestHumanApprovalAgent",
152+
name="RequestHumanApprovalAgent",
155153
description="Calls the external approval system with prepared request details",
156154
instruction="""You are a human approval request agent.
157155
Your task:
@@ -162,7 +160,7 @@ request_approval = LlmAgent(
162160
Always use the exact values from the session state for the tool call.
163161
""",
164162
tools=[approval_tool],
165-
output_key="approval_requested"
163+
output_key="approval_requested",
166164
)
167165

168166
# Agent 3: Process the approval decision
@@ -180,7 +178,7 @@ process_decision = LlmAgent(
180178
181179
Be professional and helpful in your response.
182180
""",
183-
output_key="final_decision"
181+
output_key="final_decision",
184182
)
185183
```
186184

@@ -193,7 +191,7 @@ Combine the agents into a sequential workflow. The `SequentialAgent` ensures tha
193191
approval_workflow = SequentialAgent(
194192
name="HumanApprovalWorkflowNotebook",
195193
description="Complete workflow for processing approval requests with human oversight",
196-
sub_agents=[prepare_request, request_approval, process_decision]
194+
sub_agents=[prepare_request, request_approval, process_decision],
197195
)
198196
```
199197

@@ -205,11 +203,7 @@ Set up an in-memory session service and the workflow runner.
205203
```python
206204
session_service = InMemorySessionService()
207205
# Create runner
208-
workflow_runner = Runner(
209-
agent=approval_workflow,
210-
app_name=APP_NAME,
211-
session_service=session_service
212-
)
206+
workflow_runner = Runner(agent=approval_workflow, app_name=APP_NAME, session_service=session_service)
213207
```
214208

215209
## 8. Helper Function to Run Workflow
@@ -220,15 +214,12 @@ This function encapsulates the logic to run the workflow for a given user reques
220214
```python
221215
async def run_approval_workflow_notebook(user_request: str, session_id: str):
222216
"""Run the complete approval workflow with a user request in the notebook environment"""
223-
print(f"{'='*60}")
217+
print(f"{'=' * 60}")
224218
print(f" Starting Approval Workflow for Session: {session_id}")
225-
print(f"{'='*60}")
219+
print(f"{'=' * 60}")
226220
print(f"User Request: {user_request}")
227221
# Create user message
228-
user_content = types.Content(
229-
role='user',
230-
parts=[types.Part(text=user_request)]
231-
)
222+
user_content = types.Content(role="user", parts=[types.Part(text=user_request)])
232223
step_count = 0
233224
final_response = "No response received"
234225
# Run the workflow
@@ -247,12 +238,12 @@ async def run_approval_workflow_notebook(user_request: str, session_id: str):
247238
final_response = response_text
248239
session = await session_service.get_session(
249240
app_name=APP_NAME,
250-
user_id=USER_ID,
241+
user_id=USER_ID,
251242
session_id=session_id,
252243
)
253-
print(f"{'='*60}")
244+
print(f"{'=' * 60}")
254245
print(f"📊 Workflow Complete - Session State ({session_id}):")
255-
print(f"{'='*60}")
246+
print(f"{'=' * 60}")
256247
for key, value in session.state.items():
257248
print(f" {key}: {value}")
258249
print(f"🎯 Final Response: {final_response}")
@@ -269,18 +260,16 @@ async def main_notebook():
269260
test_requests = [
270261
"I need approval for $750 for team lunch and celebrations",
271262
"Please approve $3,000 for a conference ticket and travel expenses",
272-
"I need $12,000 approved for critical software licenses renewal"
263+
"I need $12,000 approved for critical software licenses renewal",
273264
]
274265
for i, request in enumerate(test_requests, 1):
275-
current_session_id = f"approval_session_notebook_{456 + i -1}"
266+
current_session_id = f"approval_session_notebook_{456 + i - 1}"
276267
# Create the session before running the workflow
277-
await session_service.create_session(
278-
app_name=APP_NAME,
279-
user_id=USER_ID,
280-
session_id=current_session_id
281-
)
268+
await session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=current_session_id)
282269
print(f"Created session: {current_session_id}")
283270
await run_approval_workflow_notebook(request, current_session_id)
271+
272+
284273
try:
285274
asyncio.run(main_notebook())
286275
agentops.end_trace(end_state="Success")

0 commit comments

Comments
 (0)