Skip to content

Commit 4d809fc

Browse files
committed
Updated test_metrics and utility scripts
1 parent a3b6a87 commit 4d809fc

File tree

7 files changed

+28
-20
lines changed

7 files changed

+28
-20
lines changed

tests/test_metrics.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from utils.metrics import Recall
1+
from utils.metrics import Recall, F1Score
22

33

44
def test_recall():
@@ -14,3 +14,19 @@ def test_recall():
1414
assert recall_score.allclose(torch.tensor(0.7143), atol=1e-5), (
1515
f"Recall Score: {recall_score.item()}"
1616
)
17+
18+
19+
def test_f1score():
20+
import torch
21+
22+
f1_metric = F1Score(num_classes=3)
23+
preds = torch.tensor(
24+
[[0.8, 0.1, 0.1], [0.2, 0.7, 0.1], [0.2, 0.3, 0.5], [0.1, 0.2, 0.7]]
25+
)
26+
27+
target = torch.tensor([0, 1, 0, 2])
28+
29+
f1_metric.update(preds, target)
30+
assert f1_metric.tp.sum().item() > 0, "Expected some true positives."
31+
assert f1_metric.fp.sum().item() > 0, "Expected some false positives."
32+
assert f1_metric.fn.sum().item() > 0, "Expected some false negatives."

utils/dataloaders/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1-
__all__ = ["USPSDataset0_6"]
1+
__all__ = ["USPSDataset0_6", "USPSH5_Digit_7_9_Dataset"]
22

33
from .usps_0_6 import USPSDataset0_6
4+
from .uspsh5_7_9 import USPSH5_Digit_7_9_Dataset

utils/load_data.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
from torch.utils.data import Dataset
22

3-
from .dataloaders import USPSDataset0_6
3+
from .dataloaders import USPSDataset0_6, USPSH5_Digit_7_9_Dataset
44

55

66
def load_data(dataset: str, *args, **kwargs) -> Dataset:
77
match dataset.lower():
88
case "usps_0-6":
99
return USPSDataset0_6(*args, **kwargs)
10+
case "usps_7-9":
11+
return USPSH5_Digit_7_9_Dataset(*args, **kwargs)
1012
case _:
1113
raise ValueError(f"Dataset: {dataset} not implemented.")

utils/load_metric.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import numpy as np
44
import torch.nn as nn
55

6-
from .metrics import EntropyPrediction
6+
from .metrics import EntropyPrediction, F1Score
77

88

99
class MetricWrapper(nn.Module):
@@ -35,7 +35,7 @@ def _get_metric(self, key):
3535
case "entropy":
3636
return EntropyPrediction()
3737
case "f1":
38-
raise NotImplementedError("F1 score not implemented yet")
38+
raise F1Score()
3939
case "recall":
4040
raise NotImplementedError("Recall score not implemented yet")
4141
case "precision":

utils/load_model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import torch.nn as nn
22

3-
from .models import ChristianModel, MagnusModel
3+
from .models import ChristianModel, MagnusModel, SolveigModel
44

55

66
def load_model(modelname: str, *args, **kwargs) -> nn.Module:
@@ -9,6 +9,8 @@ def load_model(modelname: str, *args, **kwargs) -> nn.Module:
99
return MagnusModel(*args, **kwargs)
1010
case "christianmodel":
1111
return ChristianModel(*args, **kwargs)
12+
case "solveigmodel":
13+
return SolveigModel(*args, **kwargs)
1214
case _:
1315
raise ValueError(
1416
f"Model: {modelname} has not been implemented. \nCheck the documentation for implemented metrics, or check your spelling"

utils/metrics/F1.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -85,16 +85,3 @@ def compute(self):
8585

8686
return f1_score
8787

88-
89-
def test_f1score():
90-
f1_metric = F1Score(num_classes=3)
91-
preds = torch.tensor(
92-
[[0.8, 0.1, 0.1], [0.2, 0.7, 0.1], [0.2, 0.3, 0.5], [0.1, 0.2, 0.7]]
93-
)
94-
95-
target = torch.tensor([0, 1, 0, 2])
96-
97-
f1_metric.update(preds, target)
98-
assert f1_metric.tp.sum().item() > 0, "Expected some true positives."
99-
assert f1_metric.fp.sum().item() > 0, "Expected some false positives."
100-
assert f1_metric.fn.sum().item() > 0, "Expected some false negatives."

utils/metrics/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__all__ = ["EntropyPrediction", "Recall", "F1"]
1+
__all__ = ["EntropyPrediction", "Recall", "F1Score"]
22

33
from .EntropyPred import EntropyPrediction
44
from .recall import Recall

0 commit comments

Comments
 (0)