Skip to content

Commit ee9f548

Browse files
committed
:wqerge:|erge branch 'main' of github.com:SFI-Visual-Intelligence/Collaborative-Coding-Exam into johan/devbranch
2 parents de6d161 + 4d51869 commit ee9f548

File tree

9 files changed

+161
-172
lines changed

9 files changed

+161
-172
lines changed
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
on:
2+
push:
3+
# Sequence of patterns matched against refs/tags
4+
tags:
5+
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
6+
7+
name: Create Release
8+
9+
jobs:
10+
build:
11+
name: Create Release
12+
runs-on: ubuntu-latest
13+
steps:
14+
- name: Checkout code
15+
uses: actions/checkout@master
16+
- name: Create Release
17+
id: create_release
18+
uses: actions/create-release@latest
19+
env:
20+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
21+
with:
22+
tag_name: ${{ github.ref }}
23+
release_name: Release ${{ github.ref }}
24+
body: |
25+
Changes in this Release
26+
- First Change
27+
- Second Change
28+
draft: false
29+
prerelease: false

CollaborativeCoding/dataloaders/download.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,11 +98,11 @@ def download_svhn(path, train: bool = True):
9898
train_data = parent_path / "train_32x32.mat"
9999
test_data = parent_path / "test_32x32.mat"
100100

101-
if not train_data.is_file():
101+
if not train_data.exists():
102102
download_svhn(parent_path, train=True)
103-
if not test_data.is_file():
103+
if not test_data.exists():
104104
download_svhn(parent_path, train=False)
105-
print(test_data)
105+
106106
train_labels = loadmat(train_data)["y"]
107107
test_labels = loadmat(test_data)["y"]
108108

CollaborativeCoding/dataloaders/svhn.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from PIL import Image
77
from scipy.io import loadmat
88
from torch.utils.data import Dataset
9-
from torchvision.datasets import SVHN
109

1110

1211
class SVHNDataset(Dataset):
@@ -31,7 +30,7 @@ def __init__(
3130
"""
3231
super().__init__()
3332

34-
self.data_path = data_path
33+
self.data_path = data_path / "SVHN"
3534
self.indexes = sample_ids
3635
self.split = "train" if train else "test"
3736

@@ -41,7 +40,7 @@ def __init__(
4140
if not os.path.exists(
4241
os.path.join(self.data_path, f"svhn_{self.split}data.h5")
4342
):
44-
self._download_data(self.data_path)
43+
self._create_h5py(self.data_path)
4544

4645
assert os.path.exists(
4746
os.path.join(self.data_path, f"svhn_{self.split}data.h5")
@@ -53,15 +52,14 @@ def __init__(
5352

5453
self.num_classes = len(np.unique(self.labels))
5554

56-
def _download_data(self, path: str):
55+
def _create_h5py(self, path: str):
5756
"""
5857
Downloads the SVHN dataset to the specified directory.
5958
Args:
6059
path (str): The directory where the dataset will be downloaded.
6160
"""
6261
print(f"Downloading SVHN data into {path}")
6362

64-
SVHN(path, split=self.split, download=True)
6563
data = loadmat(os.path.join(path, f"{self.split}_32x32.mat"))
6664

6765
images, labels = data["X"], data["y"]

CollaborativeCoding/dataloaders/uspsh5_7_9.py

Lines changed: 1 addition & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,8 @@
22

33
import h5py
44
import numpy as np
5-
import torch
65
from PIL import Image
76
from torch.utils.data import Dataset
8-
from torchvision import transforms
97

108

119
class USPSH5_Digit_7_9_Dataset(Dataset):
@@ -55,6 +53,7 @@ def __init__(
5553
self.h5_path = data_path / self.filename
5654
self.sample_ids = sample_ids
5755
self.nr_channels = nr_channels
56+
self.num_classes = 3
5857

5958
# Load the dataset from the HDF5 file
6059
with h5py.File(self.filepath, "r") as hf:
@@ -103,34 +102,3 @@ def __getitem__(self, id):
103102
image = self.transform(image)
104103

105104
return image, label
106-
107-
108-
def main():
109-
# Example Usage:
110-
transform = transforms.Compose(
111-
[
112-
transforms.Resize((16, 16)), # Ensure images are 16x16
113-
transforms.ToTensor(),
114-
transforms.Normalize((0.5,), (0.5,)), # Normalize to [-1, 1]
115-
]
116-
)
117-
indices = np.array([7, 8, 9])
118-
# Load the dataset
119-
dataset = USPSH5_Digit_7_9_Dataset(
120-
data_path="C:/Users/Solveig/OneDrive/Dokumente/UiT PhD/Courses/Git",
121-
sample_ids=indices,
122-
train=False,
123-
transform=transform,
124-
)
125-
data_loader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True)
126-
batch = next(iter(data_loader)) # grab a batch from the dataloader
127-
img, label = batch
128-
print(img.shape)
129-
print(label.shape)
130-
131-
# Check dataset size
132-
print(f"Dataset size: {len(dataset)}")
133-
134-
135-
if __name__ == "__main__":
136-
main()

CollaborativeCoding/metrics/F1.py

Lines changed: 72 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -6,166 +6,121 @@
66
class F1Score(nn.Module):
77
"""
88
F1 Score implementation with support for both macro and micro averaging.
9-
109
This class computes the F1 score during training using either macro or micro averaging.
11-
The F1 score is calculated based on the true positives (TP), false positives (FP),
12-
and false negatives (FN) for each class.
13-
1410
Parameters
1511
----------
1612
num_classes : int
1713
The number of classes in the classification task.
1814
19-
macro_averaging : bool, optional, default=False
15+
macro_averaging : bool, default=False
2016
If True, computes the macro-averaged F1 score. If False, computes the micro-averaged F1 score.
21-
22-
Attributes
23-
----------
24-
num_classes : int
25-
The number of classes in the classification task.
26-
27-
tp : torch.Tensor
28-
Tensor storing the count of True Positives (TP) for each class.
29-
30-
fp : torch.Tensor
31-
Tensor storing the count of False Positives (FP) for each class.
32-
33-
fn : torch.Tensor
34-
Tensor storing the count of False Negatives (FN) for each class.
35-
36-
macro_averaging : bool
37-
A flag indicating whether to compute the macro-averaged F1 score or not.
3817
"""
3918

4019
def __init__(self, num_classes, macro_averaging=False):
41-
"""
42-
Initializes the F1Score object, setting up the necessary state variables.
43-
44-
Parameters
45-
----------
46-
num_classes : int
47-
The number of classes in the classification task.
48-
49-
macro_averaging : bool, optional, default=False
50-
If True, computes the macro-averaged F1 score. If False, computes the micro-averaged F1 score.
51-
"""
5220
super().__init__()
53-
5421
self.num_classes = num_classes
5522
self.macro_averaging = macro_averaging
5623
self.y_true = []
5724
self.y_pred = []
58-
# Initialize variables for True Positives (TP), False Positives (FP), and False Negatives (FN)
59-
self.tp = torch.zeros(num_classes)
60-
self.fp = torch.zeros(num_classes)
61-
self.fn = torch.zeros(num_classes)
6225

63-
def _micro_F1(self, target, preds):
26+
def forward(self, target, preds):
6427
"""
65-
Compute the Micro F1 score by aggregating TP, FP, and FN across all classes.
28+
Stores predictions and targets for computing the F1 score.
6629
67-
Micro F1 score is calculated globally by considering all predictions together, regardless of class.
30+
Parameters
31+
----------
32+
preds : torch.Tensor
33+
Predicted logits (shape: [batch_size, num_classes]).
34+
target : torch.Tensor
35+
True labels (shape: [batch_size]).
36+
"""
37+
preds = torch.argmax(preds, dim=-1) # Convert logits to class indices
38+
self.y_true.append(target.detach())
39+
if preds.dim() == 0: # Scalar (e.g., single class prediction)
40+
preds = preds.unsqueeze(0) # Add batch dimension
41+
self.y_pred.append(preds.detach())
42+
43+
def compute_f1(self):
44+
"""
45+
Computes the F1 score (Micro or Macro).
6846
6947
Returns
7048
-------
7149
torch.Tensor
72-
The micro-averaged F1 score.
50+
The computed F1 score.
7351
"""
74-
for i in range(self.num_classes):
75-
self.tp[i] += torch.sum((preds == i) & (target == i)).float()
76-
self.fp[i] += torch.sum((preds == i) & (target != i)).float()
77-
self.fn[i] += torch.sum((preds != i) & (target == i)).float()
52+
if not self.y_true or not self.y_pred: # Check if empty
53+
return torch.tensor(np.nan)
54+
55+
# Convert lists to tensors
56+
y_true = torch.cat(self.y_true)
57+
y_pred = torch.cat(self.y_pred)
7858

79-
tp = torch.sum(self.tp)
80-
fp = torch.sum(self.fp)
81-
fn = torch.sum(self.fn)
59+
return (
60+
self._macro_F1(y_true, y_pred)
61+
if self.macro_averaging
62+
else self._micro_F1(y_true, y_pred)
63+
)
8264

83-
precision = tp / (tp + fp + 1e-8) # Avoid division by zero
84-
recall = tp / (tp + fn + 1e-8) # Avoid division by zero
65+
def _micro_F1(self, target, preds):
66+
"""Computes Micro F1 Score (global TP, FP, FN)."""
67+
tp = torch.sum(preds == target).float()
68+
fp = torch.sum(preds != target).float()
69+
fn = fp # Since all errors are either FP or FN
70+
71+
precision = tp / (tp + fp + 1e-8)
72+
recall = tp / (tp + fn + 1e-8)
73+
f1 = 2 * (precision * recall) / (precision + recall + 1e-8)
8574

86-
f1 = (
87-
2 * precision * recall / (precision + recall + 1e-8)
88-
) # Avoid division by zero
8975
return f1
9076

9177
def _macro_F1(self, target, preds):
92-
"""
93-
Compute the Macro F1 score by calculating the F1 score per class and averaging.
94-
95-
Macro F1 score is calculated as the average of per-class F1 scores. This approach treats all classes equally,
96-
regardless of their frequency.
78+
"""Computes Macro F1 Score in a vectorized way (no loops)."""
79+
num_classes = self.num_classes
80+
target = target.long() # Ensure target is a LongTensor
81+
preds = preds.long()
82+
# Create one-hot encodings of the true and predicted labels
83+
target_one_hot = torch.nn.functional.one_hot(target, num_classes=num_classes)
84+
preds_one_hot = torch.nn.functional.one_hot(preds, num_classes=num_classes)
9785

98-
Returns
99-
-------
100-
torch.Tensor
101-
The macro-averaged F1 score.
102-
"""
103-
# Calculate True Positives (TP), False Positives (FP), and False Negatives (FN) per class
104-
for i in range(self.num_classes):
105-
self.tp[i] += torch.sum((preds == i) & (target == i)).float()
106-
self.fp[i] += torch.sum((preds == i) & (target != i)).float()
107-
self.fn[i] += torch.sum((preds != i) & (target == i)).float()
108-
109-
precision_per_class = self.tp / (
110-
self.tp + self.fp + 1e-8
111-
) # Avoid division by zero
112-
recall_per_class = self.tp / (
113-
self.tp + self.fn + 1e-8
114-
) # Avoid division by zero
115-
f1_per_class = (
116-
2
117-
* precision_per_class
118-
* recall_per_class
119-
/ (precision_per_class + recall_per_class + 1e-8)
120-
) # Avoid division by zero
121-
122-
# Take the average of F1 scores across all classes
123-
f1_score = torch.mean(f1_per_class)
124-
return f1_score
125-
126-
def forward(self, preds, target):
127-
"""
86+
# Compute TP, FP, FN for each class
87+
tp = torch.sum(target_one_hot * preds_one_hot, dim=0).float()
88+
fp = torch.sum(preds_one_hot * (1 - target_one_hot), dim=0).float()
89+
fn = torch.sum(target_one_hot * (1 - preds_one_hot), dim=0).float()
12890

129-
Update the True Positives, False Positives, and False Negatives, and compute the F1 score.
91+
# Compute precision and recall per class
92+
precision = tp / (tp + fp + 1e-8)
93+
recall = tp / (tp + fn + 1e-8)
13094

131-
This method computes the F1 score based on the predictions and true labels. It can compute either the
132-
macro-averaged or micro-averaged F1 score, depending on the `macro_averaging` flag.
95+
# Compute per-class F1 score
96+
f1_per_class = 2 * (precision * recall) / (precision + recall + 1e-8)
13397

134-
Parameters
135-
----------
136-
preds : torch.Tensor
137-
Predicted logits or class indices (shape: [batch_size, num_classes]).
138-
These logits are typically the output of a softmax or sigmoid activation.
98+
# Compute Macro F1 (mean over all classes)
99+
return torch.mean(f1_per_class)
139100

140-
target : torch.Tensor
141-
True labels (shape: [batch_size]), where each element is an integer representing the true class.
101+
def __returnmetric__(self):
102+
"""
103+
Computes and returns the F1 score (Micro or Macro).
142104
143105
Returns
144106
-------
145107
torch.Tensor
146-
The computed F1 score (either micro or macro, based on `macro_averaging`).
108+
The computed F1 score.
147109
"""
148-
preds = torch.argmax(preds, dim=-1)
149-
self.y_true.append(target)
150-
self.y_pred.append(preds)
110+
if not self.y_true or not self.y_pred: # Check if empty
111+
return torch.tensor(np.nan)
112+
113+
# Convert lists to tensors
114+
y_true = torch.cat([t.unsqueeze(0) if t.dim() == 0 else t for t in self.y_true])
115+
y_pred = torch.cat([t.unsqueeze(0) if t.dim() == 0 else t for t in self.y_pred])
151116

152-
def __returnmetric__(self):
153-
if self.y_true == [] or self.y_pred == []:
154-
return np.nan
155-
if isinstance(self.y_true, list):
156-
if len(self.y_true) == 1:
157-
self.y_true = self.y_true[0]
158-
self.y_pred = self.y_pred[0]
159-
else:
160-
self.y_true = torch.cat(self.y_true)
161-
self.y_pred = torch.cat(self.y_pred)
162117
return (
163-
self._micro_F1(self.y_true, self.y_pred)
164-
if not self.macro_averaging
165-
else self._macro_F1(self.y_true, self.y_pred)
118+
self._macro_F1(y_true, y_pred)
119+
if self.macro_averaging
120+
else self._micro_F1(y_true, y_pred)
166121
)
167122

168123
def __reset__(self):
124+
"""Resets stored predictions and targets."""
169125
self.y_true = []
170126
self.y_pred = []
171-
return None

0 commit comments

Comments
 (0)