Skip to content

Commit eb02338

Browse files
authored
Merge pull request #328 from Kylie-dot-s/fix/remove-modelres-from-resume-modules
fix: Remove ModelRes references from resume modules
2 parents 176e192 + 8df42d4 commit eb02338

File tree

2 files changed

+37
-97
lines changed

2 files changed

+37
-97
lines changed

dingo/model/llm/llm_keyword_matcher.py

Lines changed: 22 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,12 @@
99
from typing import List
1010

1111
from dingo.io import Data
12+
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
1213
from dingo.model import Model
1314
from dingo.model.llm.base_openai import BaseOpenAI
1415
from dingo.utils import log
1516
from dingo.utils.exception import ConvertJsonError
1617

17-
# Import EvalDetail for dev branch compatibility, fallback to ModelRes for main branch
18-
try:
19-
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
20-
USE_EVAL_DETAIL = True
21-
except ImportError:
22-
from dingo.model.modelres import ModelRes
23-
USE_EVAL_DETAIL = False
24-
2518
# Complete synonym mapping for keyword normalization
2619
SYNONYM_MAP = {
2720
"k8s": "Kubernetes",
@@ -185,7 +178,7 @@ def _build_prompt(jd_text: str, resume_text: str) -> str:
185178

186179
@classmethod
187180
def process_response(cls, response: str):
188-
"""Process LLM response. Returns EvalDetail (dev) or ModelRes (main)."""
181+
"""Process LLM response. Returns EvalDetail."""
189182
log.info(f"Raw LLM response: {response}")
190183

191184
# Extract think content and clean response
@@ -213,29 +206,16 @@ def process_response(cls, response: str):
213206

214207
log.info(f"Keyword match score: {score:.1%}, threshold: {cls.threshold:.0%}")
215208

216-
# Return appropriate result type based on branch
217-
if USE_EVAL_DETAIL:
218-
result = EvalDetail(metric=cls.__name__)
219-
result.score = score
220-
result.reason = [reason]
221-
if score >= cls.threshold:
222-
result.status = False
223-
result.label = [QualityLabel.QUALITY_GOOD]
224-
else:
225-
result.status = True
226-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
209+
# Return EvalDetail result
210+
result = EvalDetail(metric=cls.__name__)
211+
result.score = score
212+
result.reason = [reason]
213+
if score >= cls.threshold:
214+
result.status = False
215+
result.label = [QualityLabel.QUALITY_GOOD]
227216
else:
228-
result = ModelRes()
229-
result.score = score
230-
result.reason = [reason]
231-
if score >= cls.threshold:
232-
result.error_status = False
233-
result.type = "KEYWORD_MATCH_GOOD"
234-
result.name = "MATCH_GOOD"
235-
else:
236-
result.error_status = True
237-
result.type = "KEYWORD_MATCH_LOW"
238-
result.name = "MATCH_LOW"
217+
result.status = True
218+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
239219

240220
return result
241221

@@ -346,38 +326,22 @@ def _generate_reason(cls, jd_analysis: dict, keyword_analysis: List[dict], score
346326

347327
@classmethod
348328
def eval(cls, input_data: Data):
349-
"""Override eval to validate inputs. Returns EvalDetail (dev) or ModelRes (main)."""
329+
"""Override eval to validate inputs. Returns EvalDetail."""
350330
# Validate that content (resume) is provided
351331
if not input_data.content:
352-
if USE_EVAL_DETAIL:
353-
result = EvalDetail(metric=cls.__name__)
354-
result.status = True
355-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
356-
result.reason = ["Resume text (content) is required but was not provided"]
357-
return result
358-
else:
359-
return ModelRes(
360-
error_status=True,
361-
type="KEYWORD_MATCH_ERROR",
362-
name="MISSING_RESUME",
363-
reason=["Resume text (content) is required but was not provided"]
364-
)
332+
result = EvalDetail(metric=cls.__name__)
333+
result.status = True
334+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
335+
result.reason = ["Resume text (content) is required but was not provided"]
336+
return result
365337

366338
# Validate that prompt (JD) is provided
367339
if not input_data.prompt:
368-
if USE_EVAL_DETAIL:
369-
result = EvalDetail(metric=cls.__name__)
370-
result.status = True
371-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
372-
result.reason = ["Job description (prompt) is required but was not provided"]
373-
return result
374-
else:
375-
return ModelRes(
376-
error_status=True,
377-
type="KEYWORD_MATCH_ERROR",
378-
name="MISSING_JD",
379-
reason=["Job description (prompt) is required but was not provided"]
380-
)
340+
result = EvalDetail(metric=cls.__name__)
341+
result.status = True
342+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
343+
result.reason = ["Job description (prompt) is required but was not provided"]
344+
return result
381345

382346
# Call parent eval method
383347
return super().eval(input_data)

dingo/model/llm/llm_resume_optimizer.py

Lines changed: 15 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,12 @@
99
from typing import List, Tuple
1010

1111
from dingo.io import Data
12+
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
1213
from dingo.model import Model
1314
from dingo.model.llm.base_openai import BaseOpenAI
1415
from dingo.utils import log
1516
from dingo.utils.exception import ConvertJsonError
1617

17-
# Import EvalDetail for dev branch compatibility, fallback to ModelRes for main branch
18-
try:
19-
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
20-
USE_EVAL_DETAIL = True
21-
except ImportError:
22-
from dingo.model.modelres import ModelRes
23-
USE_EVAL_DETAIL = False
24-
2518

2619
@Model.llm_register("LLMResumeOptimizer")
2720
class LLMResumeOptimizer(BaseOpenAI):
@@ -191,7 +184,7 @@ def _parse_match_report(cls, match_report) -> Tuple[List[str], List[str], List[s
191184

192185
@classmethod
193186
def process_response(cls, response: str):
194-
"""Process LLM response. Returns EvalDetail (dev) or ModelRes (main)."""
187+
"""Process LLM response. Returns EvalDetail."""
195188
log.info(f"Raw LLM response length: {len(response)} chars")
196189

197190
# Clean response
@@ -210,22 +203,13 @@ def process_response(cls, response: str):
210203
# Generate reason text
211204
reason = cls._generate_reason(optimization_summary, section_changes, overall_improvement)
212205

213-
# Return appropriate result type based on branch
214-
if USE_EVAL_DETAIL:
215-
result = EvalDetail(metric=cls.__name__)
216-
result.status = False
217-
result.label = [QualityLabel.QUALITY_GOOD]
218-
result.reason = [reason]
219-
# Store full response for downstream use (using extra field)
220-
result.optimized_content = response_json
221-
else:
222-
result = ModelRes()
223-
result.error_status = False
224-
result.type = "RESUME_OPTIMIZED"
225-
result.name = "OPTIMIZATION_COMPLETE"
226-
result.reason = [reason]
227-
# Store full response for downstream use
228-
result.optimized_content = response_json
206+
# Return EvalDetail result
207+
result = EvalDetail(metric=cls.__name__)
208+
result.status = False
209+
result.label = [QualityLabel.QUALITY_GOOD]
210+
result.reason = [reason]
211+
# Store full response for downstream use (using extra field)
212+
result.optimized_content = response_json
229213

230214
return result
231215

@@ -287,22 +271,14 @@ def _generate_reason(cls, summary: dict, changes: List[dict], overall: str) -> s
287271

288272
@classmethod
289273
def eval(cls, input_data: Data):
290-
"""Override eval to validate inputs. Returns EvalDetail (dev) or ModelRes (main)."""
274+
"""Override eval to validate inputs. Returns EvalDetail."""
291275
# Validate that content (resume) is provided
292276
if not input_data.content:
293-
if USE_EVAL_DETAIL:
294-
result = EvalDetail(metric=cls.__name__)
295-
result.status = True
296-
result.label = [f"QUALITY_BAD.{cls.__name__}"]
297-
result.reason = ["Resume text (content) is required but was not provided"]
298-
return result
299-
else:
300-
return ModelRes(
301-
error_status=True,
302-
type="RESUME_OPTIMIZER_ERROR",
303-
name="MISSING_RESUME",
304-
reason=["Resume text (content) is required but was not provided"]
305-
)
277+
result = EvalDetail(metric=cls.__name__)
278+
result.status = True
279+
result.label = [f"QUALITY_BAD.{cls.__name__}"]
280+
result.reason = ["Resume text (content) is required but was not provided"]
281+
return result
306282

307283
# Call parent eval method
308284
return super().eval(input_data)

0 commit comments

Comments
 (0)