Skip to content

Commit 9375eeb

Browse files
committed
docs: Google style docstrings
1 parent 42bb618 commit 9375eeb

File tree

12 files changed

+403
-108
lines changed

12 files changed

+403
-108
lines changed

agents_mcp_usage/basic_mcp/basic_mcp_use/adk_mcp.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,14 @@
2020

2121

2222
async def main(query: str = "Greet Andrew and give him the current time") -> None:
23-
"""
24-
Main function to run the agent
23+
"""Runs the agent with a given query.
24+
25+
This function sets up the MCP server, creates an LLM agent, and runs it
26+
with a specified query. It also handles the cleanup of the MCP server
27+
connection.
2528
2629
Args:
27-
query (str): The query to run the agent with
30+
query: The query to run the agent with.
2831
"""
2932
# Set up MCP server connection
3033
server_params = StdioServerParameters(

agents_mcp_usage/basic_mcp/basic_mcp_use/langgraph_mcp.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
# Create server parameters for stdio connection
2222
server = StdioServerParameters(
2323
command="uv",
24-
args=["run", "mcp_servers/example_server.py", "stdio"],
24+
args=["run", "mcp_servers/example_server.py", "stdio"],
2525
)
2626

2727
model = ChatGoogleGenerativeAI(
@@ -30,11 +30,13 @@
3030

3131

3232
async def main(query: str = "Greet Andrew and give him the current time") -> None:
33-
"""
34-
Main function to run the agent
33+
"""Runs the LangGraph agent with a given query.
34+
35+
This function connects to the MCP server, loads the tools, creates a
36+
LangGraph agent, and invokes it with the provided query.
3537
3638
Args:
37-
query (str): The query to run the agent with
39+
query: The query to run the agent with.
3840
"""
3941
async with stdio_client(server) as (read, write):
4042
async with ClientSession(read, write) as session:

agents_mcp_usage/basic_mcp/basic_mcp_use/oai-agent_mcp.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,13 @@
1414

1515

1616
async def main(query: str = "Greet Andrew and give him the current time") -> None:
17-
"""
18-
Main function to run the agent
17+
"""Runs the OpenAI agent with a given query.
18+
19+
This function creates an MCP server, initializes an OpenAI agent with the
20+
server, and runs the agent with the provided query.
1921
2022
Args:
21-
query (str): The query to run the agent with
23+
query: The query to run the agent with.
2224
"""
2325
# Create and use the MCP server in an async context
2426
async with MCPServerStdio(

agents_mcp_usage/basic_mcp/basic_mcp_use/pydantic_mcp.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,13 @@
2525

2626

2727
async def main(query: str = "Greet Andrew and give him the current time") -> None:
28-
"""
29-
Main function to run the agent
28+
"""Runs the Pydantic agent with a given query.
29+
30+
This function runs the Pydantic agent with the provided query and prints the
31+
output.
3032
3133
Args:
32-
query (str): The query to run the agent with
34+
query: The query to run the agent with.
3335
"""
3436
async with agent.run_mcp_servers():
3537
result = await agent.run(query)

agents_mcp_usage/multi_mcp/eval_multi_mcp/evals_pydantic_mcp.py

Lines changed: 107 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,17 @@
6868

6969

7070
def is_retryable_error(exception: Exception) -> bool:
71-
"""Check if an exception should be retried."""
71+
"""Checks if an exception is retryable.
72+
73+
This function checks if the given exception is a retryable HTTP error or a
74+
general connection error.
75+
76+
Args:
77+
exception: The exception to check.
78+
79+
Returns:
80+
True if the exception is retryable, False otherwise.
81+
"""
7282
if isinstance(exception, ModelHTTPError):
7383
return exception.status_code in RETRYABLE_HTTP_STATUS_CODES
7484

@@ -80,27 +90,29 @@ def is_retryable_error(exception: Exception) -> bool:
8090

8191

8292
async def exponential_backoff_retry(
83-
func_call,
93+
func_call: callable,
8494
max_attempts: int = MAX_RETRY_ATTEMPTS,
8595
base_delay: float = BASE_RETRY_DELAY,
8696
max_delay: float = MAX_RETRY_DELAY,
8797
jitter: bool = True,
8898
) -> Any:
89-
"""
90-
Execute a function with exponential backoff retry logic.
99+
"""Executes a function with exponential backoff retry logic.
100+
101+
This function attempts to execute the given asynchronous function call,
102+
retrying with an exponential backoff delay if a retryable error occurs.
91103
92104
Args:
93-
func_call: Async function to retry
94-
max_attempts: Maximum number of retry attempts
95-
base_delay: Base delay between retries in seconds
96-
max_delay: Maximum delay between retries in seconds
97-
jitter: Whether to add random jitter to delays
105+
func_call: The async function to retry.
106+
max_attempts: The maximum number of retry attempts.
107+
base_delay: The base delay between retries in seconds.
108+
max_delay: The maximum delay between retries in seconds.
109+
jitter: Whether to add random jitter to the delays.
98110
99111
Returns:
100-
Result of the function call
112+
The result of the function call.
101113
102114
Raises:
103-
The last exception if all retries are exhausted
115+
The last exception if all retries are exhausted.
104116
"""
105117
last_exception = None
106118

@@ -158,7 +170,14 @@ async def exponential_backoff_retry(
158170

159171

160172
def get_mcp_servers() -> List[MCPServerStdio]:
161-
"""Get the configured MCP servers for the evaluation."""
173+
"""Gets the configured MCP servers for the evaluation.
174+
175+
This function returns a list of MCP servers required for the evaluation,
176+
including the local example server and the mermaid validator server.
177+
178+
Returns:
179+
A list of configured MCP servers.
180+
"""
162181
local_server = MCPServerStdio(
163182
command="uv",
164183
args=[
@@ -180,14 +199,17 @@ def get_mcp_servers() -> List[MCPServerStdio]:
180199
def create_agent(
181200
model: str = DEFAULT_MODEL, model_settings: Dict[str, Any] = None
182201
) -> Agent:
183-
"""Create an agent with MCP servers for the specified model.
202+
"""Creates an agent with MCP servers for the specified model.
203+
204+
This function initializes and returns an agent with the necessary MCP
205+
servers and model settings.
184206
185207
Args:
186-
model: The model to use for the agent
187-
model_settings: Optional model-specific settings
208+
model: The model to use for the agent.
209+
model_settings: Optional model-specific settings.
188210
189211
Returns:
190-
Configured Agent instance
212+
A configured Agent instance.
191213
"""
192214
if model_settings is None:
193215
model_settings = {}
@@ -230,6 +252,18 @@ class UsedBothMCPTools(Evaluator[MermaidInput, MermaidOutput]):
230252
async def evaluate(
231253
self, ctx: EvaluatorContext[MermaidInput, MermaidOutput]
232254
) -> float:
255+
"""Evaluates if both MCP tools were used in the given context.
256+
257+
This method checks the tools used in the output and returns a score
258+
based on whether tools from both MCP servers were utilized.
259+
260+
Args:
261+
ctx: The evaluator context containing the input and output.
262+
263+
Returns:
264+
A score of 1.0 if both tools were used, 0.5 if one was used,
265+
and 0.0 otherwise.
266+
"""
233267
if not ctx.output or not ctx.output.tools_used:
234268
return 0.0
235269

@@ -257,7 +291,17 @@ class UsageLimitNotExceeded(Evaluator[MermaidInput, MermaidOutput]):
257291
async def evaluate(
258292
self, ctx: EvaluatorContext[MermaidInput, MermaidOutput]
259293
) -> float:
260-
"""Check if the case failed due to usage limits being exceeded."""
294+
"""Checks if the case failed due to usage limits being exceeded.
295+
296+
This method examines the output for a usage limit failure reason and
297+
returns a score accordingly.
298+
299+
Args:
300+
ctx: The evaluator context.
301+
302+
Returns:
303+
0.0 if a usage limit failure occurred, 1.0 otherwise.
304+
"""
261305
if ctx.output and ctx.output.failure_reason == "usage_limit_exceeded":
262306
logfire.warning(
263307
"Case failed due to usage limit exceeded",
@@ -274,6 +318,17 @@ class MermaidDiagramValid(Evaluator[MermaidInput, MermaidOutput]):
274318
async def evaluate(
275319
self, ctx: EvaluatorContext[MermaidInput, MermaidOutput]
276320
) -> float:
321+
"""Evaluates if the generated mermaid diagram is valid.
322+
323+
This method validates the mermaid diagram in the output, handling
324+
retries and logging the results.
325+
326+
Args:
327+
ctx: The evaluator context.
328+
329+
Returns:
330+
1.0 if the diagram is valid, 0.0 otherwise.
331+
"""
277332
# Skip validation if there was a failure
278333
if ctx.output and ctx.output.failure_reason:
279334
logfire.info(
@@ -331,14 +386,17 @@ async def evaluate(
331386
async def fix_mermaid_diagram(
332387
inputs: MermaidInput, model: str = DEFAULT_MODEL
333388
) -> MermaidOutput:
334-
"""Fix an invalid mermaid diagram using the agent with multiple MCP servers.
389+
"""Fixes an invalid mermaid diagram using an agent with multiple MCP servers.
390+
391+
This function runs an agent to fix a given mermaid diagram, handling
392+
various exceptions and capturing metrics.
335393
336394
Args:
337-
inputs: The input containing the invalid diagram
338-
model: The model to use for the agent
395+
inputs: The input containing the invalid diagram.
396+
model: The model to use for the agent.
339397
340398
Returns:
341-
MermaidOutput with the fixed diagram and captured metrics
399+
A MermaidOutput object with the fixed diagram and captured metrics.
342400
"""
343401
query = f"Add the current time and fix the mermaid diagram syntax using the validator: {inputs.invalid_diagram}. Return only the fixed mermaid diagram between backticks."
344402

@@ -477,13 +535,16 @@ async def _run_agent():
477535
def create_evaluation_dataset(
478536
judge_model: str = DEFAULT_MODEL,
479537
) -> Dataset[MermaidInput, MermaidOutput, Any]:
480-
"""Create the dataset for evaluating mermaid diagram fixing.
538+
"""Creates the dataset for evaluating mermaid diagram fixing.
539+
540+
This function constructs a dataset with test cases of varying difficulty
541+
and a set of evaluators for judging the results.
481542
482543
Args:
483-
judge_model: The model to use for LLM judging
544+
judge_model: The model to use for LLM judging.
484545
485546
Returns:
486-
The evaluation dataset
547+
The evaluation dataset.
487548
"""
488549
return Dataset[MermaidInput, MermaidOutput, Any](
489550
# Construct 3 tests, each asks the LLM to fix an invalid mermaid diagram of increasing difficulty
@@ -546,23 +607,30 @@ def create_evaluation_dataset(
546607

547608

548609
def get_timestamp_prefix() -> str:
549-
"""Get a timestamp prefix in the format yyyy-mm-dd_H-M-s."""
610+
"""Gets a timestamp prefix in the format yyyy-mm-dd_H-M-s.
611+
612+
Returns:
613+
A string representing the current timestamp.
614+
"""
550615
now = datetime.now()
551616
return now.strftime("%Y-%m-%d_%H-%M-%S")
552617

553618

554619
def write_mermaid_results_to_csv(
555620
report: EvaluationReport, model: str, output_dir: str = "./mermaid_results"
556621
) -> str:
557-
"""Write mermaid evaluation results with metrics to a CSV file.
622+
"""Writes mermaid evaluation results with metrics to a CSV file.
623+
624+
This function takes an evaluation report and writes the results to a CSV
625+
file, including scores and metrics.
558626
559627
Args:
560-
report: The evaluation report from pydantic_evals
561-
model: The model name used for evaluation
562-
output_dir: Directory to write the CSV file
628+
report: The evaluation report from pydantic_evals.
629+
model: The model name used for evaluation.
630+
output_dir: The directory to write the CSV file to.
563631
564632
Returns:
565-
Path to the created CSV file
633+
The path to the created CSV file.
566634
"""
567635
os.makedirs(output_dir, exist_ok=True)
568636

@@ -655,16 +723,19 @@ async def run_evaluations(
655723
export_csv: bool = True,
656724
output_dir: str = "./mermaid_results",
657725
) -> EvaluationReport:
658-
"""Run the evaluations on the mermaid diagram fixing task.
726+
"""Runs the evaluations on the mermaid diagram fixing task.
727+
728+
This function sets up the evaluation dataset, runs the evaluation for a
729+
given model, and exports the results to a CSV file.
659730
660731
Args:
661-
model: The model to use for the agent
662-
judge_model: The model to use for LLM judging
663-
export_csv: Whether to export results to CSV
664-
output_dir: Directory to save results
732+
model: The model to use for the agent.
733+
judge_model: The model to use for LLM judging.
734+
export_csv: Whether to export the results to a CSV file.
735+
output_dir: The directory to save the results to.
665736
666737
Returns:
667-
The evaluation report
738+
The evaluation report.
668739
"""
669740
dataset = create_evaluation_dataset(judge_model)
670741

0 commit comments

Comments
 (0)