@@ -71,7 +71,8 @@ def scoring_log_likelihood(labels: LABELS_VALUE_TYPE, scores: SCORES_VALUE_TYPE,
7171 log_likelihood = labels_array * np .log (scores_array ) + (1 - labels_array ) * np .log (1 - scores_array )
7272 clipped_one = log_likelihood .clip (min = - 100 , max = 100 )
7373 res = clipped_one .mean ()
74- return res # type: ignore[no-any-return]
74+ # test produces different output
75+ return round (float (res ), 6 )
7576
7677
7778def scoring_roc_auc (labels : LABELS_VALUE_TYPE , scores : SCORES_VALUE_TYPE ) -> float :
@@ -96,7 +97,7 @@ def scoring_roc_auc(labels: LABELS_VALUE_TYPE, scores: SCORES_VALUE_TYPE) -> flo
9697 if labels_ .ndim == 1 :
9798 labels_ = (labels_ [:, None ] == np .arange (n_classes )[None , :]).astype (int )
9899
99- return roc_auc_score (labels_ , scores_ , average = "macro" ) # type: ignore[no-any-return]
100+ return float ( roc_auc_score (labels_ , scores_ , average = "macro" ))
100101
101102
102103def _calculate_decision_metric (func : DecisionMetricFn , labels : LABELS_VALUE_TYPE , scores : SCORES_VALUE_TYPE ) -> float :
@@ -206,7 +207,7 @@ def scoring_hit_rate(labels: LABELS_VALUE_TYPE, scores: SCORES_VALUE_TYPE) -> fl
206207 top_ranked_labels = np .argmax (scores_ , axis = 1 )
207208 is_in = labels_ [np .arange (len (labels )), top_ranked_labels ]
208209
209- return np .mean (is_in ) # type: ignore[no-any-return]
210+ return float ( np .mean (is_in ))
210211
211212
212213def scoring_neg_coverage (labels : LABELS_VALUE_TYPE , scores : SCORES_VALUE_TYPE ) -> float :
@@ -242,7 +243,7 @@ def scoring_neg_coverage(labels: LABELS_VALUE_TYPE, scores: SCORES_VALUE_TYPE) -
242243 labels_ , scores_ = transform (labels , scores )
243244
244245 n_classes = scores_ .shape [1 ]
245- return 1 - (coverage_error (labels , scores ) - 1 ) / (n_classes - 1 ) # type: ignore[no-any-return]
246+ return float ( 1 - (coverage_error (labels , scores ) - 1 ) / (n_classes - 1 ))
246247
247248
248249def scoring_neg_ranking_loss (labels : LABELS_VALUE_TYPE , scores : SCORES_VALUE_TYPE ) -> float :
@@ -258,7 +259,7 @@ def scoring_neg_ranking_loss(labels: LABELS_VALUE_TYPE, scores: SCORES_VALUE_TYP
258259 :param scores: for each utterance, this list contains scores for each of `n_classes` classes
259260 :return: Score of the scoring metric
260261 """
261- return - label_ranking_loss (labels , scores ) # type: ignore[no-any-return]
262+ return float ( - label_ranking_loss (labels , scores ))
262263
263264
264265def scoring_map (labels : LABELS_VALUE_TYPE , scores : SCORES_VALUE_TYPE ) -> float :
@@ -274,4 +275,4 @@ def scoring_map(labels: LABELS_VALUE_TYPE, scores: SCORES_VALUE_TYPE) -> float:
274275 :param scores: for each sample, this list contains scores for each of `n_classes` classes
275276 :return: mean average precision score
276277 """
277- return label_ranking_average_precision_score (labels , scores ) # type: ignore[no-any-return]
278+ return float ( label_ranking_average_precision_score (labels , scores ))
0 commit comments