Skip to content

Commit 42bb618

Browse files
committed
chore: Ruff linting fixes
1 parent 4bc6f87 commit 42bb618

File tree

4 files changed

+6
-14
lines changed

4 files changed

+6
-14
lines changed

agents_mcp_usage/multi_mcp/eval_multi_mcp/evals_pydantic_mcp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
import os
1919
import random
2020
from datetime import datetime
21-
from typing import Any, Dict, List, Optional
21+
from typing import Any, Dict, List
2222

2323
import logfire
2424
from dotenv import load_dotenv

agents_mcp_usage/multi_mcp/eval_multi_mcp/merbench_ui.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,10 @@
33
import pandas as pd
44
import plotly.graph_objects as go
55
import streamlit as st
6-
import numpy as np
76
from typing import List, Dict
87
import json
98
import re
109
from pydantic import ValidationError
11-
import csv
12-
import io
1310

1411
from agents_mcp_usage.multi_mcp.eval_multi_mcp.dashboard_config import (
1512
DEFAULT_CONFIG,

agents_mcp_usage/multi_mcp/eval_multi_mcp/run_multi_evals.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,8 @@
1616
import csv
1717
import os
1818
import statistics
19-
import sys
2019
from datetime import datetime
21-
from pathlib import Path
22-
from typing import Any, Dict, List, Optional, Set
20+
from typing import Any, Dict, List, Optional
2321

2422
import logfire
2523
from dotenv import load_dotenv
@@ -39,9 +37,6 @@
3937
DEFAULT_MODELS,
4038
MermaidInput,
4139
MermaidOutput,
42-
UsedBothMCPTools,
43-
UsageLimitNotExceeded,
44-
MermaidDiagramValid,
4540
fix_mermaid_diagram,
4641
create_evaluation_dataset,
4742
get_timestamp_prefix,
@@ -226,7 +221,7 @@ async def fix_with_model(inputs: MermaidInput) -> MermaidOutput:
226221

227222
error_msg = f"Error during evaluation: {str(e)}"
228223
logfire.error(
229-
"Evaluation error",
224+
f"Evaluation error: {error_msg}",
230225
model=model,
231226
run_index=run_index,
232227
error=str(e),
@@ -409,7 +404,7 @@ async def run_all_evaluations(
409404
self, n_runs: int, parallel: bool = True, timeout: int = 120
410405
) -> str:
411406
"""Run evaluations for all models and return path to combined results."""
412-
self.console.print(f"[bold green]Starting multi-model evaluation[/bold green]")
407+
self.console.print("[bold green]Starting multi-model evaluation[/bold green]")
413408
self.console.print(f"Models: {', '.join(self.models)}")
414409
self.console.print(f"Runs per model: {n_runs}")
415410
self.console.print(f"Parallel execution: {parallel}")
@@ -424,7 +419,7 @@ async def run_all_evaluations(
424419

425420
self.print_final_summary()
426421

427-
self.console.print(f"\n[bold green]All evaluations complete![/bold green]")
422+
self.console.print("\n[bold green]All evaluations complete![/bold green]")
428423
self.console.print(f"Combined results: {combined_file}")
429424

430425
return combined_file

agents_mcp_usage/multi_mcp/eval_multi_mcp/schemas.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
"""
44

55
from pydantic import BaseModel, Field, validator
6-
from typing import List, Dict, Optional, Any
6+
from typing import List, Dict, Optional
77

88

99
class PrimaryMetricConfig(BaseModel):

0 commit comments

Comments
 (0)