Skip to content

Commit cdd37c3

Browse files
committed
fix: resolve pylint warnings and code formatting issues
- Fix line too long in pairwise_analyzer.py (C0301) - Remove unnecessary lambda in generator.py (W0108) - Fix parameter renaming issue in _generate_rubrics method (W0237) - Remove unused DEFAULT_RUBRICS import - Apply isort and black formatting fixes
1 parent a8874e5 commit cdd37c3

File tree

7 files changed

+23
-22
lines changed

7 files changed

+23
-22
lines changed

cookbooks/zero_shot_evaluation/schema.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
from loguru import logger
1616
from pydantic import BaseModel, Field
1717

18-
1918
# =============================================================================
2019
# Data Models
2120
# =============================================================================

cookbooks/zero_shot_evaluation/zero_shot_pipeline.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
)
3434

3535
# OpenJudge core components
36-
from openjudge.analyzer import PairwiseAnalyzer, PairwiseAnalysisResult
36+
from openjudge.analyzer import PairwiseAnalysisResult, PairwiseAnalyzer
3737
from openjudge.generator.simple_rubric import TaskBasedRubricGenerator
3838
from openjudge.graders.llm_grader import GraderMode, LLMGrader
3939
from openjudge.graders.schema import GraderResult
@@ -42,7 +42,6 @@
4242
from openjudge.models.schema.prompt_template import PromptTemplate
4343
from openjudge.runner.grading_runner import GraderConfig, GradingRunner
4444

45-
4645
# =============================================================================
4746
# Checkpoint Management (integrated from checkpoint.py)
4847
# =============================================================================

openjudge/analyzer/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,10 @@
1212
"""
1313

1414
from openjudge.analyzer.base_analyzer import AnalysisResult, BaseAnalyzer
15-
from openjudge.analyzer.pairwise_analyzer import PairwiseAnalysisResult, PairwiseAnalyzer
15+
from openjudge.analyzer.pairwise_analyzer import (
16+
PairwiseAnalysisResult,
17+
PairwiseAnalyzer,
18+
)
1619

1720
__all__ = [
1821
# Base classes

openjudge/analyzer/pairwise_analyzer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -180,8 +180,8 @@ def analyze(
180180
worst_model=rankings[-1][0] if rankings else "",
181181
metadata={
182182
"num_models": len(self.model_names),
183-
"explanation": f"Analyzed {len(grader_results)} pairwise comparisons across {len(self.model_names)} models",
183+
"explanation": (
184+
f"Analyzed {len(grader_results)} pairwise comparisons " f"across {len(self.model_names)} models"
185+
),
184186
},
185187
)
186-
187-

openjudge/generator/__init__.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,14 @@
2323
DEFAULT_RUBRICS: Default fallback rubrics if generation fails
2424
"""
2525

26-
from openjudge.generator.base_generator import BaseGraderGenerator, GraderGeneratorConfig
27-
from openjudge.generator.llm_grader_generator import LLMGraderGenerator, LLMGraderGeneratorConfig
26+
from openjudge.generator.base_generator import (
27+
BaseGraderGenerator,
28+
GraderGeneratorConfig,
29+
)
30+
from openjudge.generator.llm_grader_generator import (
31+
LLMGraderGenerator,
32+
LLMGraderGeneratorConfig,
33+
)
2834

2935
# Simple rubric generation
3036
from openjudge.generator.simple_rubric import (

openjudge/generator/simple_rubric/generator.py

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,7 @@
3535
LLMGraderGenerator,
3636
LLMGraderGeneratorConfig,
3737
)
38-
from openjudge.generator.simple_rubric.rubric_generator import (
39-
DEFAULT_RUBRICS,
40-
TaskBasedRubricGenerator,
41-
)
38+
from openjudge.generator.simple_rubric.rubric_generator import TaskBasedRubricGenerator
4239
from openjudge.graders.llm_grader import LLMGrader
4340
from openjudge.graders.schema import GraderMode
4441
from openjudge.models.openai_chat_model import OpenAIChatModel
@@ -71,7 +68,7 @@ class SimpleRubricsGeneratorConfig(LLMGraderGeneratorConfig):
7168
task_description: str = ""
7269
scenario: Optional[str] = None
7370
language: LanguageEnum = LanguageEnum.EN
74-
default_rubrics: List[str] = field(default_factory=lambda: DEFAULT_RUBRICS.copy())
71+
default_rubrics: List[str] = field(default_factory=list)
7572
max_retries: int = 3
7673
min_score: int = 0
7774
max_score: int = 1
@@ -168,21 +165,19 @@ async def generate(
168165

169166
async def _generate_rubrics(
170167
self,
171-
sample_queries: Optional[List[str]] = None,
168+
dataset: Optional[List[str]] = None, # pylint: disable=arguments-renamed
172169
) -> str:
173170
"""Generate rubrics from task description.
174171
175172
Args:
176-
sample_queries: Optional list of sample queries for context.
173+
dataset: Optional list of sample queries for context.
177174
178175
Returns:
179176
str: Formatted string containing evaluation rubrics.
180177
"""
181-
rubrics_list = await self._rubric_generator.generate(sample_queries=sample_queries)
178+
rubrics_list = await self._rubric_generator.generate(sample_queries=dataset)
182179

183-
formatted_rubrics = "\n\n".join(
184-
[f"{i + 1}. {rubric}" for i, rubric in enumerate(rubrics_list)]
185-
)
180+
formatted_rubrics = "\n\n".join([f"{i + 1}. {rubric}" for i, rubric in enumerate(rubrics_list)])
186181

187182
logger.info(f"Generated {len(rubrics_list)} rubrics from task description")
188183

tests/generator/test_simple_rubric.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
2. Generate rubrics from task description (no labeled data required)
1010
3. Optionally create a complete LLMGrader for evaluation
1111
12-
Supports both TaskBasedRubricGenerator (rubrics only) and
12+
Supports both TaskBasedRubricGenerator (rubrics only) and
1313
SimpleRubricsGenerator (complete LLMGrader).
1414
1515
Example:
@@ -44,7 +44,6 @@
4444
from openjudge.models.openai_chat_model import OpenAIChatModel
4545
from openjudge.models.schema.prompt_template import LanguageEnum
4646

47-
4847
# =============================================================================
4948
# Test Data
5049
# =============================================================================

0 commit comments

Comments
 (0)