@@ -263,8 +263,19 @@ Clean plugin architecture for custom rules, prompts, and models:
263263class MyCustomRule (BaseRule ):
264264 @ classmethod
265265 def eval (cls , input_data : Data) -> EvalDetail:
266- # Your logic here
267- return EvalDetail(status = False , label = [' QUALITY_GOOD' ])
266+ # Example: check if content is empty
267+ if not input_data.content:
268+ return EvalDetail(
269+ metric = cls .__name__ ,
270+ status = True , # Found an issue
271+ label = [f ' { cls .metric_type} . { cls .__name__ } ' ],
272+ reason = [" Content is empty" ]
273+ )
274+ return EvalDetail(
275+ metric = cls .__name__ ,
276+ status = False , # No issue found
277+ label = [' QUALITY_GOOD' ]
278+ )
268279```
269280** Why It Matters** : Adapt to domain-specific requirements without forking the codebase.
270281
@@ -287,9 +298,9 @@ Dingo provides **70+ evaluation metrics** across multiple dimensions, combining
287298| ** Security** | PII detection, Perspective API toxicity | Privacy and safety |
288299
289300📊 ** [ View Complete Metrics Documentation →] ( docs/metrics.md ) **
290- 📖 ** [ RAG Evaluation Guide →] ( docs/rag_evaluation_metrics_zh.md ) **
291- 🔍 ** [ Hallucination Detection Guide →] ( docs/hallucination_guide.md ) **
292- ✅ ** [ Factuality Assessment Guide →] ( docs/factcheck_guide.md ) **
301+ 📖 ** [ RAG Evaluation Guide (中文) →] ( docs/rag_evaluation_metrics_zh.md ) **
302+ 🔍 ** [ Hallucination Detection Guide (中文) →] ( docs/hallucination_guide.md ) **
303+ ✅ ** [ Factuality Assessment Guide (中文) →] ( docs/factcheck_guide.md ) **
293304
294305Most metrics are backed by academic research to ensure scientific rigor.
295306
@@ -451,6 +462,7 @@ class DomainSpecificRule(BaseRule):
451462 is_valid = your_validation_logic(text)
452463
453464 return EvalDetail(
465+ metric = cls .__name__ ,
454466 status = not is_valid, # False = good, True = bad
455467 label = [' QUALITY_GOOD' if is_valid else ' QUALITY_BAD_CUSTOM' ],
456468 reason = [" Validation details..." ]
0 commit comments