Skip to content

Commit c7bdcbe

Browse files
committed
fix: Remove ModelRes references from resume modules
- Remove conditional import of ModelRes (now deleted) - Use EvalDetail as the only return type - Update LLMKeywordMatcher to return EvalDetail - Update LLMResumeOptimizer to return EvalDetail - Remove USE_EVAL_DETAIL flag and related branching logic - Simplify code by removing backward compatibility code This fixes import errors after ModelRes was removed from the codebase.
1 parent 8bf0093 commit c7bdcbe

File tree

2 files changed

+37
-95
lines changed

2 files changed

+37
-95
lines changed

dingo/model/llm/llm_keyword_matcher.py

Lines changed: 22 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,7 @@
1414
from dingo.utils import log
1515
from dingo.utils.exception import ConvertJsonError
1616

17-
# Import EvalDetail for dev branch compatibility, fallback to ModelRes for main branch
18-
try:
19-
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
20-
USE_EVAL_DETAIL = True
21-
except ImportError:
22-
from dingo.model.modelres import ModelRes
23-
USE_EVAL_DETAIL = False
17+
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
2418

2519
# Complete synonym mapping for keyword normalization
2620
SYNONYM_MAP = {
@@ -185,7 +179,7 @@ def _build_prompt(jd_text: str, resume_text: str) -> str:
185179

186180
@classmethod
187181
def process_response(cls, response: str):
188-
"""Process LLM response. Returns EvalDetail (dev) or ModelRes (main)."""
182+
"""Process LLM response. Returns EvalDetail."""
189183
log.info(f"Raw LLM response: {response}")
190184

191185
# Extract think content and clean response
@@ -213,29 +207,16 @@ def process_response(cls, response: str):
213207

214208
log.info(f"Keyword match score: {score:.1%}, threshold: {cls.threshold:.0%}")
215209

216-
# Return appropriate result type based on branch
217-
if USE_EVAL_DETAIL:
218-
result = EvalDetail(metric=cls.__name__)
219-
result.score = score
220-
result.reason = [reason]
221-
if score >= cls.threshold:
222-
result.status = False
223-
result.label = [QualityLabel.QUALITY_GOOD]
224-
else:
225-
result.status = True
226-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
210+
# Return EvalDetail result
211+
result = EvalDetail(metric=cls.__name__)
212+
result.score = score
213+
result.reason = [reason]
214+
if score >= cls.threshold:
215+
result.status = False
216+
result.label = [QualityLabel.QUALITY_GOOD]
227217
else:
228-
result = ModelRes()
229-
result.score = score
230-
result.reason = [reason]
231-
if score >= cls.threshold:
232-
result.error_status = False
233-
result.type = "KEYWORD_MATCH_GOOD"
234-
result.name = "MATCH_GOOD"
235-
else:
236-
result.error_status = True
237-
result.type = "KEYWORD_MATCH_LOW"
238-
result.name = "MATCH_LOW"
218+
result.status = True
219+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
239220

240221
return result
241222

@@ -346,38 +327,22 @@ def _generate_reason(cls, jd_analysis: dict, keyword_analysis: List[dict], score
346327

347328
@classmethod
348329
def eval(cls, input_data: Data):
349-
"""Override eval to validate inputs. Returns EvalDetail (dev) or ModelRes (main)."""
330+
"""Override eval to validate inputs. Returns EvalDetail."""
350331
# Validate that content (resume) is provided
351332
if not input_data.content:
352-
if USE_EVAL_DETAIL:
353-
result = EvalDetail(metric=cls.__name__)
354-
result.status = True
355-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
356-
result.reason = ["Resume text (content) is required but was not provided"]
357-
return result
358-
else:
359-
return ModelRes(
360-
error_status=True,
361-
type="KEYWORD_MATCH_ERROR",
362-
name="MISSING_RESUME",
363-
reason=["Resume text (content) is required but was not provided"]
364-
)
333+
result = EvalDetail(metric=cls.__name__)
334+
result.status = True
335+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
336+
result.reason = ["Resume text (content) is required but was not provided"]
337+
return result
365338

366339
# Validate that prompt (JD) is provided
367340
if not input_data.prompt:
368-
if USE_EVAL_DETAIL:
369-
result = EvalDetail(metric=cls.__name__)
370-
result.status = True
371-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
372-
result.reason = ["Job description (prompt) is required but was not provided"]
373-
return result
374-
else:
375-
return ModelRes(
376-
error_status=True,
377-
type="KEYWORD_MATCH_ERROR",
378-
name="MISSING_JD",
379-
reason=["Job description (prompt) is required but was not provided"]
380-
)
341+
result = EvalDetail(metric=cls.__name__)
342+
result.status = True
343+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
344+
result.reason = ["Job description (prompt) is required but was not provided"]
345+
return result
381346

382347
# Call parent eval method
383348
return super().eval(input_data)

dingo/model/llm/llm_resume_optimizer.py

Lines changed: 15 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,7 @@
1414
from dingo.utils import log
1515
from dingo.utils.exception import ConvertJsonError
1616

17-
# Import EvalDetail for dev branch compatibility, fallback to ModelRes for main branch
18-
try:
19-
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
20-
USE_EVAL_DETAIL = True
21-
except ImportError:
22-
from dingo.model.modelres import ModelRes
23-
USE_EVAL_DETAIL = False
17+
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
2418

2519

2620
@Model.llm_register("LLMResumeOptimizer")
@@ -191,7 +185,7 @@ def _parse_match_report(cls, match_report) -> Tuple[List[str], List[str], List[s
191185

192186
@classmethod
193187
def process_response(cls, response: str):
194-
"""Process LLM response. Returns EvalDetail (dev) or ModelRes (main)."""
188+
"""Process LLM response. Returns EvalDetail."""
195189
log.info(f"Raw LLM response length: {len(response)} chars")
196190

197191
# Clean response
@@ -210,22 +204,13 @@ def process_response(cls, response: str):
210204
# Generate reason text
211205
reason = cls._generate_reason(optimization_summary, section_changes, overall_improvement)
212206

213-
# Return appropriate result type based on branch
214-
if USE_EVAL_DETAIL:
215-
result = EvalDetail(metric=cls.__name__)
216-
result.status = False
217-
result.label = [QualityLabel.QUALITY_GOOD]
218-
result.reason = [reason]
219-
# Store full response for downstream use (using extra field)
220-
result.optimized_content = response_json
221-
else:
222-
result = ModelRes()
223-
result.error_status = False
224-
result.type = "RESUME_OPTIMIZED"
225-
result.name = "OPTIMIZATION_COMPLETE"
226-
result.reason = [reason]
227-
# Store full response for downstream use
228-
result.optimized_content = response_json
207+
# Return EvalDetail result
208+
result = EvalDetail(metric=cls.__name__)
209+
result.status = False
210+
result.label = [QualityLabel.QUALITY_GOOD]
211+
result.reason = [reason]
212+
# Store full response for downstream use (using extra field)
213+
result.optimized_content = response_json
229214

230215
return result
231216

@@ -287,22 +272,14 @@ def _generate_reason(cls, summary: dict, changes: List[dict], overall: str) -> s
287272

288273
@classmethod
289274
def eval(cls, input_data: Data):
290-
"""Override eval to validate inputs. Returns EvalDetail (dev) or ModelRes (main)."""
275+
"""Override eval to validate inputs. Returns EvalDetail."""
291276
# Validate that content (resume) is provided
292277
if not input_data.content:
293-
if USE_EVAL_DETAIL:
294-
result = EvalDetail(metric=cls.__name__)
295-
result.status = True
296-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
297-
result.reason = ["Resume text (content) is required but was not provided"]
298-
return result
299-
else:
300-
return ModelRes(
301-
error_status=True,
302-
type="RESUME_OPTIMIZER_ERROR",
303-
name="MISSING_RESUME",
304-
reason=["Resume text (content) is required but was not provided"]
305-
)
278+
result = EvalDetail(metric=cls.__name__)
279+
result.status = True
280+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
281+
result.reason = ["Resume text (content) is required but was not provided"]
282+
return result
306283

307284
# Call parent eval method
308285
return super().eval(input_data)

0 commit comments

Comments
 (0)