|
14 | 14 | from dingo.utils import log |
15 | 15 | from dingo.utils.exception import ConvertJsonError |
16 | 16 |
|
17 | | -# Import EvalDetail for dev branch compatibility, fallback to ModelRes for main branch |
18 | | -try: |
19 | | - from dingo.io.output.eval_detail import EvalDetail, QualityLabel |
20 | | - USE_EVAL_DETAIL = True |
21 | | -except ImportError: |
22 | | - from dingo.model.modelres import ModelRes |
23 | | - USE_EVAL_DETAIL = False |
| 17 | +from dingo.io.output.eval_detail import EvalDetail, QualityLabel |
24 | 18 |
|
25 | 19 | # Complete synonym mapping for keyword normalization |
26 | 20 | SYNONYM_MAP = { |
@@ -185,7 +179,7 @@ def _build_prompt(jd_text: str, resume_text: str) -> str: |
185 | 179 |
|
186 | 180 | @classmethod |
187 | 181 | def process_response(cls, response: str): |
188 | | - """Process LLM response. Returns EvalDetail (dev) or ModelRes (main).""" |
| 182 | + """Process LLM response. Returns EvalDetail.""" |
189 | 183 | log.info(f"Raw LLM response: {response}") |
190 | 184 |
|
191 | 185 | # Extract think content and clean response |
@@ -213,29 +207,16 @@ def process_response(cls, response: str): |
213 | 207 |
|
214 | 208 | log.info(f"Keyword match score: {score:.1%}, threshold: {cls.threshold:.0%}") |
215 | 209 |
|
216 | | - # Return appropriate result type based on branch |
217 | | - if USE_EVAL_DETAIL: |
218 | | - result = EvalDetail(metric=cls.__name__) |
219 | | - result.score = score |
220 | | - result.reason = [reason] |
221 | | - if score >= cls.threshold: |
222 | | - result.status = False |
223 | | - result.label = [QualityLabel.QUALITY_GOOD] |
224 | | - else: |
225 | | - result.status = True |
226 | | - result.label = [f"QUALITY_BAD.{cls.__name__}"] |
| 210 | + # Return EvalDetail result |
| 211 | + result = EvalDetail(metric=cls.__name__) |
| 212 | + result.score = score |
| 213 | + result.reason = [reason] |
| 214 | + if score >= cls.threshold: |
| 215 | + result.status = False |
| 216 | + result.label = [QualityLabel.QUALITY_GOOD] |
227 | 217 | else: |
228 | | - result = ModelRes() |
229 | | - result.score = score |
230 | | - result.reason = [reason] |
231 | | - if score >= cls.threshold: |
232 | | - result.error_status = False |
233 | | - result.type = "KEYWORD_MATCH_GOOD" |
234 | | - result.name = "MATCH_GOOD" |
235 | | - else: |
236 | | - result.error_status = True |
237 | | - result.type = "KEYWORD_MATCH_LOW" |
238 | | - result.name = "MATCH_LOW" |
| 218 | + result.status = True |
| 219 | + result.label = [f"QUALITY_BAD.{cls.__name__}"] |
239 | 220 |
|
240 | 221 | return result |
241 | 222 |
|
@@ -346,38 +327,22 @@ def _generate_reason(cls, jd_analysis: dict, keyword_analysis: List[dict], score |
346 | 327 |
|
347 | 328 | @classmethod |
348 | 329 | def eval(cls, input_data: Data): |
349 | | - """Override eval to validate inputs. Returns EvalDetail (dev) or ModelRes (main).""" |
| 330 | + """Override eval to validate inputs. Returns EvalDetail.""" |
350 | 331 | # Validate that content (resume) is provided |
351 | 332 | if not input_data.content: |
352 | | - if USE_EVAL_DETAIL: |
353 | | - result = EvalDetail(metric=cls.__name__) |
354 | | - result.status = True |
355 | | - result.label = [f"QUALITY_BAD.{cls.__name__}"] |
356 | | - result.reason = ["Resume text (content) is required but was not provided"] |
357 | | - return result |
358 | | - else: |
359 | | - return ModelRes( |
360 | | - error_status=True, |
361 | | - type="KEYWORD_MATCH_ERROR", |
362 | | - name="MISSING_RESUME", |
363 | | - reason=["Resume text (content) is required but was not provided"] |
364 | | - ) |
| 333 | + result = EvalDetail(metric=cls.__name__) |
| 334 | + result.status = True |
| 335 | + result.label = [f"QUALITY_BAD.{cls.__name__}"] |
| 336 | + result.reason = ["Resume text (content) is required but was not provided"] |
| 337 | + return result |
365 | 338 |
|
366 | 339 | # Validate that prompt (JD) is provided |
367 | 340 | if not input_data.prompt: |
368 | | - if USE_EVAL_DETAIL: |
369 | | - result = EvalDetail(metric=cls.__name__) |
370 | | - result.status = True |
371 | | - result.label = [f"QUALITY_BAD.{cls.__name__}"] |
372 | | - result.reason = ["Job description (prompt) is required but was not provided"] |
373 | | - return result |
374 | | - else: |
375 | | - return ModelRes( |
376 | | - error_status=True, |
377 | | - type="KEYWORD_MATCH_ERROR", |
378 | | - name="MISSING_JD", |
379 | | - reason=["Job description (prompt) is required but was not provided"] |
380 | | - ) |
| 341 | + result = EvalDetail(metric=cls.__name__) |
| 342 | + result.status = True |
| 343 | + result.label = [f"QUALITY_BAD.{cls.__name__}"] |
| 344 | + result.reason = ["Job description (prompt) is required but was not provided"] |
| 345 | + return result |
381 | 346 |
|
382 | 347 | # Call parent eval method |
383 | 348 | return super().eval(input_data) |
0 commit comments