Skip to content

Commit d67fc9c

Browse files
author
wangjiaju.716
committed
Merge branch 'main' of https://github.com/doraemonlove/veadk-python into fix/builtintooltrace
2 parents 1959962 + 17b2fca commit d67fc9c

File tree

9 files changed

+289
-87
lines changed

9 files changed

+289
-87
lines changed

config.yaml.full

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,15 @@ tool:
5454
dataset_id: #dataset name
5555
mcp_router:
5656
url: #mcp sse/streamable-http url
57+
code_sandbox:
58+
url: #mcp sse/streamable-http url
59+
api_key: #mcp api key
60+
browser_sandbox:
61+
url: #mcp sse/streamable-http url
62+
api_key: #mcp api key
63+
computer_sandbox:
64+
url: #mcp sse/streamable-http url
65+
api_key: #mcp api key
5766

5867

5968
observability:

veadk/runner.py

Lines changed: 88 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from typing import Union
1515

1616
from google.adk.agents import RunConfig
17+
from google.adk.agents.invocation_context import LlmCallsLimitExceededError
1718
from google.adk.agents.run_config import StreamingMode
1819
from google.adk.plugins.base_plugin import BasePlugin
1920
from google.adk.runners import Runner as ADKRunner
@@ -49,20 +50,25 @@ class Runner:
4950
def __init__(
5051
self,
5152
agent: VeAgent,
52-
short_term_memory: ShortTermMemory,
53+
short_term_memory: ShortTermMemory | None = None,
5354
plugins: list[BasePlugin] | None = None,
5455
app_name: str = "veadk_default_app",
5556
user_id: str = "veadk_default_user",
5657
):
57-
# basic settings
5858
self.app_name = app_name
5959
self.user_id = user_id
6060

61-
# agent settings
6261
self.agent = agent
6362

64-
self.short_term_memory = short_term_memory
65-
self.session_service = short_term_memory.session_service
63+
if not short_term_memory:
64+
logger.info(
65+
"No short term memory provided, using a in-memory memory by default."
66+
)
67+
self.short_term_memory = ShortTermMemory()
68+
else:
69+
self.short_term_memory = short_term_memory
70+
71+
self.session_service = self.short_term_memory.session_service
6672

6773
# prevent VeRemoteAgent has no long-term memory attr
6874
if isinstance(self.agent, Agent):
@@ -114,35 +120,44 @@ async def _run(
114120
self,
115121
session_id: str,
116122
message: types.Content,
123+
run_config: RunConfig | None = None,
117124
stream: bool = False,
118125
):
119126
stream_mode = StreamingMode.SSE if stream else StreamingMode.NONE
120127

121-
async def event_generator():
122-
async for event in self.runner.run_async(
123-
user_id=self.user_id,
124-
session_id=session_id,
125-
new_message=message,
126-
run_config=RunConfig(streaming_mode=stream_mode),
127-
):
128-
if event.get_function_calls():
129-
for function_call in event.get_function_calls():
130-
logger.debug(f"Function call: {function_call}")
131-
elif (
132-
event.content is not None
133-
and event.content.parts
134-
and event.content.parts[0].text is not None
135-
and len(event.content.parts[0].text.strip()) > 0
136-
):
137-
yield event.content.parts[0].text
128+
if run_config is not None:
129+
stream_mode = run_config.streaming_mode
130+
else:
131+
run_config = RunConfig(streaming_mode=stream_mode)
132+
try:
138133

139-
final_output = ""
140-
async for chunk in event_generator():
134+
async def event_generator():
135+
async for event in self.runner.run_async(
136+
user_id=self.user_id,
137+
session_id=session_id,
138+
new_message=message,
139+
run_config=run_config,
140+
):
141+
if event.get_function_calls():
142+
for function_call in event.get_function_calls():
143+
logger.debug(f"Function call: {function_call}")
144+
elif (
145+
event.content is not None
146+
and event.content.parts
147+
and event.content.parts[0].text is not None
148+
and len(event.content.parts[0].text.strip()) > 0
149+
):
150+
yield event.content.parts[0].text
151+
152+
final_output = ""
153+
async for chunk in event_generator():
154+
if stream:
155+
print(chunk, end="", flush=True)
156+
final_output += chunk
141157
if stream:
142-
print(chunk, end="", flush=True)
143-
final_output += chunk
144-
if stream:
145-
print() # end with a new line
158+
print() # end with a new line
159+
except LlmCallsLimitExceededError as e:
160+
logger.warning(f"Max number of llm calls limit exceeded: {e}")
146161

147162
return final_output
148163

@@ -151,6 +166,7 @@ async def run(
151166
messages: RunnerMessage,
152167
session_id: str,
153168
stream: bool = False,
169+
run_config: RunConfig | None = None,
154170
save_tracing_data: bool = False,
155171
):
156172
converted_messages: list = self._convert_messages(messages)
@@ -163,7 +179,9 @@ async def run(
163179

164180
final_output = ""
165181
for converted_message in converted_messages:
166-
final_output = await self._run(session_id, converted_message, stream)
182+
final_output = await self._run(
183+
session_id, converted_message, run_config, stream
184+
)
167185

168186
# try to save tracing file
169187
if save_tracing_data:
@@ -193,6 +211,47 @@ def get_trace_id(self) -> str:
193211
logger.warning(f"Get tracer id failed as {e}")
194212
return "<unknown_trace_id>"
195213

214+
async def run_with_raw_message(
215+
self,
216+
message: types.Content,
217+
session_id: str,
218+
run_config: RunConfig | None = None,
219+
):
220+
run_config = RunConfig() if not run_config else run_config
221+
222+
await self.short_term_memory.create_session(
223+
app_name=self.app_name, user_id=self.user_id, session_id=session_id
224+
)
225+
226+
try:
227+
228+
async def event_generator():
229+
async for event in self.runner.run_async(
230+
user_id=self.user_id,
231+
session_id=session_id,
232+
new_message=message,
233+
run_config=run_config,
234+
):
235+
if event.get_function_calls():
236+
for function_call in event.get_function_calls():
237+
logger.debug(f"Function call: {function_call}")
238+
elif (
239+
event.content is not None
240+
and event.content.parts
241+
and event.content.parts[0].text is not None
242+
and len(event.content.parts[0].text.strip()) > 0
243+
):
244+
yield event.content.parts[0].text
245+
246+
final_output = ""
247+
248+
async for chunk in event_generator():
249+
final_output += chunk
250+
except LlmCallsLimitExceededError as e:
251+
logger.warning(f"Max number of llm calls limit exceeded: {e}")
252+
253+
return final_output
254+
196255
def _print_trace_id(self) -> None:
197256
if not isinstance(self.agent, Agent):
198257
logger.warning(

veadk/tools/sandbox/browser_sandbox.py

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,26 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
browser_sandbox = ...
15+
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
1616

17+
from veadk.config import getenv
18+
from veadk.utils.mcp_utils import get_mcp_params
1719

18-
def browser_use(prompt: str) -> str:
19-
"""Using the remote browser sandbox to according to the prompt.
20+
url = getenv("TOOL_BROWSER_SANDBOX_URL")
2021

21-
Args:
22-
prompt (str): The prompt to be used.
2322

24-
Returns:
25-
str: The response from the sandbox.
26-
"""
27-
...
23+
browser_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
24+
25+
# browser_sandbox = ...
26+
27+
28+
# def browser_use(prompt: str) -> str:
29+
# """Using the remote browser sandbox to according to the prompt.
30+
31+
# Args:
32+
# prompt (str): The prompt to be used.
33+
34+
# Returns:
35+
# str: The response from the sandbox.
36+
# """
37+
# ...

veadk/tools/sandbox/code_sandbox.py

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,29 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
code_sandbox = ...
15+
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
1616

17+
from veadk.config import getenv
18+
from veadk.utils.mcp_utils import get_mcp_params
1719

18-
def code_execution(code: str, language: str) -> str:
19-
"""Execute code in sandbox.
20+
url = getenv("TOOL_CODE_SANDBOX_URL")
2021

21-
Args:
22-
code (str): The code to be executed.
23-
language (str): The language of the code.
2422

25-
Returns:
26-
str: The response from the sandbox.
27-
"""
23+
code_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
2824

29-
res = code_sandbox(code, language)
30-
return res
25+
# code_sandbox = ...
26+
27+
28+
# def code_execution(code: str, language: str) -> str:
29+
# """Execute code in sandbox.
30+
31+
# Args:
32+
# code (str): The code to be executed.
33+
# language (str): The language of the code.
34+
35+
# Returns:
36+
# str: The response from the sandbox.
37+
# """
38+
39+
# res = code_sandbox(code, language)
40+
# return res

veadk/tracing/base_tracer.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -19,25 +19,6 @@
1919
logger = get_logger(__name__)
2020

2121

22-
def replace_bytes_with_empty(data):
23-
"""
24-
Recursively traverse the data structure and replace all bytes types with empty strings.
25-
Supports handling any nested structure of lists and dictionaries.
26-
"""
27-
if isinstance(data, dict):
28-
# Handle dictionary: Recursively process each value
29-
return {k: replace_bytes_with_empty(v) for k, v in data.items()}
30-
elif isinstance(data, list):
31-
# Handle list: Recursively process each element
32-
return [replace_bytes_with_empty(item) for item in data]
33-
elif isinstance(data, bytes):
34-
# When encountering the bytes type, replace it with an empty string
35-
return "<image data>"
36-
else:
37-
# Keep other types unchanged
38-
return data
39-
40-
4122
class BaseTracer(ABC):
4223
def __init__(self, name: str):
4324
self.name = name

veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -371,13 +371,38 @@ def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
371371
)
372372

373373

374+
def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorResponse:
375+
functions = []
376+
377+
for idx, (tool_name, tool_instance) in enumerate(
378+
params.llm_request.tools_dict.items()
379+
):
380+
functions.append(
381+
{
382+
f"gen_ai.request.functions.{idx}.name": tool_instance.name,
383+
f"gen_ai.request.functions.{idx}.description": tool_instance.description,
384+
f"gen_ai.request.functions.{idx}.parameters": str(
385+
tool_instance._get_declaration().parameters.model_dump( # type: ignore
386+
exclude_none=True
387+
)
388+
if tool_instance._get_declaration()
389+
and tool_instance._get_declaration().parameters # type: ignore
390+
else {}
391+
),
392+
}
393+
)
394+
395+
return ExtractorResponse(content=functions)
396+
397+
374398
LLM_ATTRIBUTES = {
375399
# ===== request attributes =====
376400
"gen_ai.request.model": llm_gen_ai_request_model,
377401
"gen_ai.request.type": llm_gen_ai_request_type,
378402
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
379403
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
380404
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
405+
"gen_ai.request.functions": llm_gen_ai_request_functions,
381406
# ===== response attributes =====
382407
"gen_ai.response.model": llm_gen_ai_response_model,
383408
"gen_ai.response.stop_reason": llm_gen_ai_response_stop_reason,
@@ -395,8 +420,8 @@ def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
395420
# attributes
396421
"gen_ai.prompt": llm_gen_ai_prompt,
397422
"gen_ai.completion": llm_gen_ai_completion,
398-
"input.value": llm_input_value, # TLS required
399-
"output.value": llm_output_value, # TLS required
423+
# "input.value": llm_input_value,
424+
# "output.value": llm_output_value,
400425
# ===== usage =====
401426
"gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens,
402427
"gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,

veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
3838
return ExtractorResponse(type="event", content=tool_input)
3939

4040

41-
def tool_cozeloop_input(params: ToolAttributesParams) -> ExtractorResponse:
41+
def tool_gen_ai_tool_input(params: ToolAttributesParams) -> ExtractorResponse:
4242
tool_input = {
4343
"name": params.tool.name,
4444
"description": params.tool.description,
@@ -53,7 +53,7 @@ def tool_gen_ai_tool_name(params: ToolAttributesParams) -> ExtractorResponse:
5353
return ExtractorResponse(content=params.tool.name or "<unknown_tool_name>")
5454

5555

56-
def tool_cozeloop_output(params: ToolAttributesParams) -> ExtractorResponse:
56+
def tool_gen_ai_tool_output(params: ToolAttributesParams) -> ExtractorResponse:
5757
function_response = params.function_response_event.get_function_responses()[
5858
0
5959
].model_dump()
@@ -70,6 +70,8 @@ def tool_cozeloop_output(params: ToolAttributesParams) -> ExtractorResponse:
7070
TOOL_ATTRIBUTES = {
7171
"gen_ai.operation.name": tool_gen_ai_operation_name,
7272
"gen_ai.tool.name": tool_gen_ai_tool_name, # TLS required
73-
"cozeloop.input": tool_cozeloop_input, # CozeLoop required
74-
"cozeloop.output": tool_cozeloop_output, # CozeLoop required
73+
"gen_ai.tool.input": tool_gen_ai_tool_input, # TLS required
74+
"gen_ai.tool.output": tool_gen_ai_tool_output, # TLS required
75+
"cozeloop.input": tool_gen_ai_tool_input, # CozeLoop required
76+
"cozeloop.output": tool_gen_ai_tool_output, # CozeLoop required
7577
}

0 commit comments

Comments
 (0)