|
3 | 3 | import pytest |
4 | 4 |
|
5 | 5 | from CollaborativeCoding.load_metric import MetricWrapper |
6 | | -from CollaborativeCoding.metrics import Accuracy, F1Score, Precision, Recall |
| 6 | +from CollaborativeCoding.metrics import Accuracy, F1Score, Precision, Recall, EntropyPrediction |
7 | 7 |
|
8 | 8 |
|
9 | 9 | @pytest.mark.parametrize( |
@@ -34,9 +34,9 @@ def test_metric_wrapper(metric, num_classes, macro_averaging): |
34 | 34 | ) |
35 | 35 |
|
36 | 36 | metrics(y_true, logits) |
37 | | - score = metrics.accumulate() |
38 | | - metrics.reset() |
39 | | - empty_score = metrics.accumulate() |
| 37 | + score = metrics.__getmetrics__() |
| 38 | + metrics.__resetmetrics__() |
| 39 | + empty_score = metrics.__getmetrics__() |
40 | 40 |
|
41 | 41 | assert isinstance(score, dict), "Expected a dictionary output." |
42 | 42 | assert metric in score, f"Expected {metric} metric in the output." |
@@ -129,17 +129,30 @@ def test_precision(): |
129 | 129 |
|
130 | 130 | def test_accuracy(): |
131 | 131 | import torch |
| 132 | + import numpy as np |
132 | 133 |
|
133 | | - accuracy = Accuracy(num_classes=5) |
134 | | - |
135 | | - y_true = torch.tensor([0, 3, 2, 3, 4]) |
136 | | - y_pred = torch.tensor([0, 1, 2, 3, 4]) |
137 | | - |
138 | | - accuracy_score = accuracy(y_true, y_pred) |
| 134 | + # Test the accuracy metric |
| 135 | + y_true = torch.tensor([0, 1, 2, 3, 4, 5]) |
| 136 | + y_pred = torch.tensor([0, 1, 2, 3, 4, 5]) |
| 137 | + accuracy = Accuracy(num_classes=6, macro_averaging=False) |
| 138 | + accuracy(y_true, y_pred) |
| 139 | + assert accuracy.__returnmetric__() == 1.0, "Expected accuracy to be 1.0" |
| 140 | + accuracy.__reset__() |
| 141 | + assert accuracy.__returnmetric__() is np.nan, "Expected accuracy to be 0.0" |
| 142 | + y_pred = torch.tensor([0, 1, 2, 3, 4, 4]) |
| 143 | + accuracy(y_true, y_pred) |
| 144 | + assert np.abs(accuracy.__returnmetric__() - 0.8333333134651184) < 1e-5, "Expected accuracy to be 0.8333333134651184" |
| 145 | + accuracy.__reset__() |
| 146 | + accuracy.macro_averaging = True |
| 147 | + accuracy(y_true, y_pred) |
| 148 | + y_true_1 = torch.tensor([0, 1, 2, 3, 4, 5]) |
| 149 | + y_pred_1 = torch.tensor([0, 1, 2, 3, 4, 4]) |
| 150 | + accuracy(y_true_1, y_pred_1) |
| 151 | + assert np.abs(accuracy.__returnmetric__() - 0.8333333134651184) < 1e-5, "Expected accuracy to be 0.8333333134651186" |
| 152 | + accuracy.macro_averaging = False |
| 153 | + assert np.abs(accuracy.__returnmetric__() - 0.8333333134651184) < 1e-5, "Expected accuracy to be 0.8333333134651184" |
| 154 | + accuracy.__reset__() |
139 | 155 |
|
140 | | - assert torch.abs(torch.tensor(accuracy_score - 0.8)) < 1e-5, ( |
141 | | - f"Accuracy Score: {accuracy_score.item()}" |
142 | | - ) |
143 | 156 |
|
144 | 157 |
|
145 | 158 | def test_entropypred(): |
|
0 commit comments