|
9 | 9 | from typing import List |
10 | 10 |
|
11 | 11 | from dingo.io import Data |
| 12 | +from dingo.io.output.eval_detail import EvalDetail, QualityLabel |
12 | 13 | from dingo.model import Model |
13 | 14 | from dingo.model.llm.base_openai import BaseOpenAI |
14 | 15 | from dingo.utils import log |
15 | 16 | from dingo.utils.exception import ConvertJsonError |
16 | 17 |
|
17 | | -# Import EvalDetail for dev branch compatibility, fallback to ModelRes for main branch |
18 | | -try: |
19 | | - from dingo.io.output.eval_detail import EvalDetail, QualityLabel |
20 | | - USE_EVAL_DETAIL = True |
21 | | -except ImportError: |
22 | | - from dingo.model.modelres import ModelRes |
23 | | - USE_EVAL_DETAIL = False |
24 | | - |
25 | 18 | # Complete synonym mapping for keyword normalization |
26 | 19 | SYNONYM_MAP = { |
27 | 20 | "k8s": "Kubernetes", |
@@ -185,7 +178,7 @@ def _build_prompt(jd_text: str, resume_text: str) -> str: |
185 | 178 |
|
186 | 179 | @classmethod |
187 | 180 | def process_response(cls, response: str): |
188 | | - """Process LLM response. Returns EvalDetail (dev) or ModelRes (main).""" |
| 181 | + """Process LLM response. Returns EvalDetail.""" |
189 | 182 | log.info(f"Raw LLM response: {response}") |
190 | 183 |
|
191 | 184 | # Extract think content and clean response |
@@ -213,29 +206,16 @@ def process_response(cls, response: str): |
213 | 206 |
|
214 | 207 | log.info(f"Keyword match score: {score:.1%}, threshold: {cls.threshold:.0%}") |
215 | 208 |
|
216 | | - # Return appropriate result type based on branch |
217 | | - if USE_EVAL_DETAIL: |
218 | | - result = EvalDetail(metric=cls.__name__) |
219 | | - result.score = score |
220 | | - result.reason = [reason] |
221 | | - if score >= cls.threshold: |
222 | | - result.status = False |
223 | | - result.label = [QualityLabel.QUALITY_GOOD] |
224 | | - else: |
225 | | - result.status = True |
226 | | - result.label = [f"QUALITY_BAD.{cls.__name__}"] |
| 209 | + # Return EvalDetail result |
| 210 | + result = EvalDetail(metric=cls.__name__) |
| 211 | + result.score = score |
| 212 | + result.reason = [reason] |
| 213 | + if score >= cls.threshold: |
| 214 | + result.status = False |
| 215 | + result.label = [QualityLabel.QUALITY_GOOD] |
227 | 216 | else: |
228 | | - result = ModelRes() |
229 | | - result.score = score |
230 | | - result.reason = [reason] |
231 | | - if score >= cls.threshold: |
232 | | - result.error_status = False |
233 | | - result.type = "KEYWORD_MATCH_GOOD" |
234 | | - result.name = "MATCH_GOOD" |
235 | | - else: |
236 | | - result.error_status = True |
237 | | - result.type = "KEYWORD_MATCH_LOW" |
238 | | - result.name = "MATCH_LOW" |
| 217 | + result.status = True |
| 218 | + result.label = [f"QUALITY_BAD.{cls.__name__}"] |
239 | 219 |
|
240 | 220 | return result |
241 | 221 |
|
@@ -346,38 +326,22 @@ def _generate_reason(cls, jd_analysis: dict, keyword_analysis: List[dict], score |
346 | 326 |
|
347 | 327 | @classmethod |
348 | 328 | def eval(cls, input_data: Data): |
349 | | - """Override eval to validate inputs. Returns EvalDetail (dev) or ModelRes (main).""" |
| 329 | + """Override eval to validate inputs. Returns EvalDetail.""" |
350 | 330 | # Validate that content (resume) is provided |
351 | 331 | if not input_data.content: |
352 | | - if USE_EVAL_DETAIL: |
353 | | - result = EvalDetail(metric=cls.__name__) |
354 | | - result.status = True |
355 | | - result.label = [f"QUALITY_BAD.{cls.__name__}"] |
356 | | - result.reason = ["Resume text (content) is required but was not provided"] |
357 | | - return result |
358 | | - else: |
359 | | - return ModelRes( |
360 | | - error_status=True, |
361 | | - type="KEYWORD_MATCH_ERROR", |
362 | | - name="MISSING_RESUME", |
363 | | - reason=["Resume text (content) is required but was not provided"] |
364 | | - ) |
| 332 | + result = EvalDetail(metric=cls.__name__) |
| 333 | + result.status = True |
| 334 | + result.label = [f"QUALITY_BAD.{cls.__name__}"] |
| 335 | + result.reason = ["Resume text (content) is required but was not provided"] |
| 336 | + return result |
365 | 337 |
|
366 | 338 | # Validate that prompt (JD) is provided |
367 | 339 | if not input_data.prompt: |
368 | | - if USE_EVAL_DETAIL: |
369 | | - result = EvalDetail(metric=cls.__name__) |
370 | | - result.status = True |
371 | | - result.label = [f"QUALITY_BAD.{cls.__name__}"] |
372 | | - result.reason = ["Job description (prompt) is required but was not provided"] |
373 | | - return result |
374 | | - else: |
375 | | - return ModelRes( |
376 | | - error_status=True, |
377 | | - type="KEYWORD_MATCH_ERROR", |
378 | | - name="MISSING_JD", |
379 | | - reason=["Job description (prompt) is required but was not provided"] |
380 | | - ) |
| 340 | + result = EvalDetail(metric=cls.__name__) |
| 341 | + result.status = True |
| 342 | + result.label = [f"QUALITY_BAD.{cls.__name__}"] |
| 343 | + result.reason = ["Job description (prompt) is required but was not provided"] |
| 344 | + return result |
381 | 345 |
|
382 | 346 | # Call parent eval method |
383 | 347 | return super().eval(input_data) |
0 commit comments