Skip to content

Commit 71ebba2

Browse files
committed
pylints
1 parent 98968e6 commit 71ebba2

File tree

7 files changed

+29
-33
lines changed

7 files changed

+29
-33
lines changed

graphgen/models/evaluator/qa/length_evaluator.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,9 @@
55

66

77
class LengthEvaluator(BaseEvaluator):
8-
def __init__(self):
9-
self.tokenizer: Tokenizer = Tokenizer(os.environ["TOKENIZER_MODEL"] or "cl100k_base")
8+
def __init__(self, tokenizer_name: str = None):
9+
tokenizer_model = tokenizer_name or os.environ.get("TOKENIZER_MODEL", "cl100k_base")
10+
self.tokenizer: Tokenizer = Tokenizer(tokenizer_model)
1011

1112
def evaluate(self, pair: QAPair) -> float:
1213
"""

graphgen/models/evaluator/qa/uni_evaluator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,9 @@ def _build_input_text(dimension: str, question: str, answer: str) -> str:
5555
"""Construct input text for specified dimension."""
5656
if dimension == "naturalness":
5757
return f"question: Is this a natural response? </s> response: {answer}"
58-
elif dimension == "coherence":
58+
if dimension == "coherence":
5959
return f"question: Is this a coherent response? </s> response: {answer} </s> history: {question}"
60-
elif dimension == "understandability":
60+
if dimension == "understandability":
6161
return f"question: Is this an understandable response? </s> response: {answer}"
6262
raise NotImplementedError(f"Unsupported dimension '{dimension}'")
6363

graphgen/operators/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from .quiz import QuizService
99
from .read import read
1010
from .search import SearchService
11-
from .evaluate import EvaluateService
1211

1312

1413
operators = {

graphgen/operators/evaluate/evaluate_kg.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,30 +28,30 @@ def __init__(
2828
self.chunk_storage: BaseKVStorage = init_storage(
2929
backend=kv_backend, working_dir=working_dir, namespace="chunk"
3030
)
31-
31+
3232
# Initialize LLM client
3333
self.llm_client: BaseLLMWrapper = init_llm("synthesizer")
34-
34+
3535
# Initialize individual evaluators
3636
self.accuracy_evaluator = AccuracyEvaluator(
3737
graph_storage=self.graph_storage,
3838
chunk_storage=self.chunk_storage,
3939
llm_client=self.llm_client,
4040
)
41-
41+
4242
self.consistency_evaluator = ConsistencyEvaluator(
4343
graph_storage=self.graph_storage,
4444
chunk_storage=self.chunk_storage,
4545
llm_client=self.llm_client,
4646
)
47-
47+
4848
# Structure evaluator doesn't need chunk_storage or llm_client
4949
structure_params = kwargs.get("structure_params", {})
5050
self.structure_evaluator = StructureEvaluator(
5151
graph_storage=self.graph_storage,
5252
**structure_params
5353
)
54-
54+
5555
logger.info("KG evaluators initialized")
5656

5757

graphgen/operators/evaluate/evaluate_service.py

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -32,15 +32,15 @@ def __init__(
3232
self.kwargs = kwargs
3333
self.graph_backend = graph_backend
3434
self.kv_backend = kv_backend
35-
35+
3636
# Separate QA and KG metrics
3737
self.qa_metrics = [m for m in self.metrics if m.startswith("qa_")]
3838
self.kg_metrics = [m for m in self.metrics if m.startswith("kg_")]
39-
39+
4040
# Initialize evaluators
4141
self.qa_evaluators = {}
4242
self.kg_evaluators: Optional[KGEvaluators] = None
43-
43+
4444
self._init_evaluators()
4545

4646
def _init_evaluators(self):
@@ -68,7 +68,7 @@ def _init_evaluators(self):
6868
)
6969
else:
7070
raise ValueError(f"Unknown QA metric: {metric}")
71-
71+
7272
# Initialize KG evaluators if KG metrics are specified
7373
if self.kg_metrics:
7474
kg_params = self.kwargs.get("kg_params", {})
@@ -148,14 +148,14 @@ def _evaluate_kg(self) -> Dict[str, Any]:
148148
return {}
149149

150150
results = {}
151-
151+
152152
# Map metric names to evaluation functions
153153
kg_metric_map = {
154154
"kg_accuracy": evaluate_accuracy,
155155
"kg_consistency": evaluate_consistency,
156156
"kg_structure": evaluate_structure,
157157
}
158-
158+
159159
# Run KG evaluations based on metrics
160160
for metric in self.kg_metrics:
161161
if metric in kg_metric_map:
@@ -168,12 +168,12 @@ def _evaluate_kg(self) -> Dict[str, Any]:
168168
results[metric_key] = {"error": str(e)}
169169
else:
170170
logger.warning("Unknown KG metric: %s, skipping", metric)
171-
171+
172172
# If no valid metrics were found, run all evaluations
173173
if not results:
174174
logger.info("No valid KG metrics found, running all evaluations")
175175
results = evaluate_all(self.kg_evaluators)
176-
176+
177177
return results
178178

179179
def evaluate(
@@ -182,39 +182,38 @@ def evaluate(
182182
# Determine evaluation type
183183
has_qa_metrics = len(self.qa_metrics) > 0
184184
has_kg_metrics = len(self.kg_metrics) > 0
185-
185+
186186
# If items provided and QA metrics exist, do QA evaluation
187187
if items is not None and has_qa_metrics:
188188
return self._evaluate_qa(items)
189-
189+
190190
# If KG metrics exist, do KG evaluation
191191
if has_kg_metrics:
192192
return self._evaluate_kg()
193-
193+
194194
# If no metrics specified, try to infer from context
195195
if items is not None:
196196
logger.warning("No QA metrics specified but items provided, skipping evaluation")
197197
return []
198-
else:
199-
logger.warning("No metrics specified, skipping evaluation")
200-
return {}
198+
logger.warning("No metrics specified, skipping evaluation")
199+
return {}
201200

202201
def process(self, batch: pd.DataFrame) -> pd.DataFrame:
203202
has_qa_metrics = len(self.qa_metrics) > 0
204203
has_kg_metrics = len(self.kg_metrics) > 0
205-
204+
206205
# QA evaluation: process batch items
207206
if has_qa_metrics:
208207
items = batch.to_dict(orient="records")
209208
results = self._evaluate_qa(items)
210209
return pd.DataFrame(results)
211-
210+
212211
# KG evaluation: evaluate from storage
213212
if has_kg_metrics:
214213
results = self._evaluate_kg()
215214
# Convert dict to DataFrame (single row)
216215
return pd.DataFrame([results])
217-
216+
218217
# No metrics specified
219218
logger.warning("No metrics specified, returning empty DataFrame")
220219
return pd.DataFrame()

graphgen/templates/evaluation/kg/consistency_evaluation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,4 +100,3 @@
100100
"en": "",
101101
"zh": ""
102102
}
103-

graphgen/utils/help_nltk.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,14 @@
33
from typing import Dict, List, Final, Optional
44
import warnings
55
import nltk
6+
import jieba
67

78
warnings.filterwarnings(
8-
"ignore",
9+
"ignore",
910
category=UserWarning,
10-
module="jieba\._compat"
11+
module=r"jieba\._compat"
1112
)
1213

13-
14-
import jieba
15-
1614
class NLTKHelper:
1715
"""
1816
NLTK helper class

0 commit comments

Comments
 (0)