@@ -196,18 +196,19 @@ def scoring_neg_coverage(labels: LABELS_VALUE_TYPE, scores: SCORES_VALUE_TYPE) -
196196
197197 The result is equivalent to executing the following code:
198198
199- .. code-block:: python
200-
201- scores = np.array(scores)
202- labels = np.array(labels)
203-
204- n_classes = scores.shape[1]
205- from scipy.stats import rankdata
206- int_ranks = rankdata(scores, axis=1) # int ranks are from [1, n_classes]
207- filtered_ranks = int_ranks * labels # guarantee that 0 labels wont have max rank
208- max_ranks = np.max(filtered_ranks, axis=1)
209- float_ranks = (max_ranks - 1) / (n_classes - 1) # float ranks are from [0,1]
210- res = 1 - np.mean(float_ranks)
199+ >>> def compute_rank_metric():
200+ ... import numpy as np
201+ ... scores = np.array([[1, 2, 3]])
202+ ... labels = np.array([1, 0, 0])
203+ ... n_classes = scores.shape[1]
204+ ... from scipy.stats import rankdata
205+ ... int_ranks = rankdata(scores, axis=1)
206+ ... filtered_ranks = int_ranks * labels
207+ ... max_ranks = np.max(filtered_ranks, axis=1)
208+ ... float_ranks = (max_ranks - 1) / (n_classes - 1)
209+ ... return float(1 - np.mean(float_ranks))
210+ >>> print(f"{compute_rank_metric():.1f}")
211+ 1.0
211212
212213 :param labels: ground truth labels for each utterance
213214 :param scores: for each utterance, this list contains scores for each of `n_classes` classes
0 commit comments