@@ -45,7 +45,10 @@ def jaccard_similarity(a: str, b: str) -> float:
4545 return {
4646 "f1_score" : f1_score ,
4747 "precision" : precision ,
48- "recall" : recall
48+ "recall" : recall ,
49+ "total_correct" : total_correct ,
50+ "total_predicted" : total_predicted ,
51+ "total_ground_truth" : total_ground_truth
4952 }
5053
5154def term_typing_metrics (y_true : List [Dict [str , List [str ]]], y_pred : List [Dict [str , List [str ]]]) -> Dict [str , float | int ]:
@@ -80,7 +83,10 @@ def term_typing_metrics(y_true: List[Dict[str, List[str]]], y_pred: List[Dict[st
8083 return {
8184 "f1_score" : f1_score ,
8285 "precision" : precision ,
83- "recall" : recall
86+ "recall" : recall ,
87+ "total_correct" : total_correct ,
88+ "total_predicted" : total_predicted ,
89+ "total_ground_truth" : total_ground_truth
8490 }
8591
8692def taxonomy_discovery_metrics (y_true : List [Dict [str , str ]], y_pred : List [Dict [str , str ]]) -> Dict [str , float | int ]:
@@ -102,7 +108,10 @@ def taxonomy_discovery_metrics(y_true: List[Dict[str, str]], y_pred: List[Dict[s
102108 return {
103109 "f1_score" : f1_score ,
104110 "precision" : precision ,
105- "recall" : recall
111+ "recall" : recall ,
112+ "total_correct" : total_correct ,
113+ "total_predicted" : total_predicted ,
114+ "total_ground_truth" : total_ground_truth
106115 }
107116
108117
@@ -137,5 +146,8 @@ def expand_symmetric(triples: Set[Tuple[str, str, str]]) -> Set[Tuple[str, str,
137146 return {
138147 "f1_score" : f1_score ,
139148 "precision" : precision ,
140- "recall" : recall
149+ "recall" : recall ,
150+ "total_correct" : total_correct ,
151+ "total_predicted" : total_predicted ,
152+ "total_ground_truth" : total_ground_truth
141153 }
0 commit comments