-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathperformencer.py
More file actions
127 lines (107 loc) · 4.07 KB
/
performencer.py
File metadata and controls
127 lines (107 loc) · 4.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import json
import torch
from torch import nn
class Performencer:
def __init__(self, name, output_size):
self.name = name
self.total = 0
self.true = 0
self.loss_sum = 0.0
self.loss_batches = 0
self.accuracies = []
self.losses = []
self.output_size = output_size
self.criterion = nn.CrossEntropyLoss()
def pinpoint(self, should_print=True):
self.accuracies.append(self.true / self.total)
self.total = 0
self.true = 0
self.losses.append(self.loss_sum / self.loss_batches)
self.loss_sum = 0.0
self.loss_batches = 0
if should_print:
print("-----------------")
print("%s Accuracy: %f" % (self.name, self.accuracies[-1]))
print("%s Loss: %f" % (self.name, self.losses[-1]))
print()
def add_acc(self, outputs, label):
total, count = self.categorical_accuracy(outputs, label)
self.total += total
self.true += count
def add_loss(self, outputs, label):
# Calc loss
loss = self.criterion(outputs, label)
self.loss_sum += loss.item()
self.loss_batches += 1
return loss
def categorical_accuracy(self, outputs, label):
outputs = outputs.cpu()
label = label.cpu()
# Argmax output
max_preds = outputs.argmax(dim=1, keepdim=True)
total = 0
correct = 0
for p, l in zip(max_preds, label):
total += 1
if p == l:
correct += 1
return total, correct
def log_to_file(self, file_name):
json_acc = json.dumps(self.accuracies)
json_loss = json.dumps(self.losses)
with open("./accuracies/%s" % file_name, "w+") as file:
file.write(json_acc)
file.write('\n')
file.write(json_loss)
# def categorical_accuracy(self, outputs, label):
# outputs = outputs.cpu()
# label = label.cpu()
#
# # Argmax output
# max_preds = outputs.argmax(dim=1, keepdim=True)
#
# # Filter values
# filter_count = 0
# if self.filter_value != -1:
# for o, l in zip(max_preds.squeeze(1), label):
# if o.item() == self.filter_value and l.item() == self.filter_value:
# filter_count += 1
#
# # Get all non pad elements
# non_pad_elements = (label != self.tag_pad_index).nonzero()
#
# # Get all correct predictions
# correct = max_preds[non_pad_elements].squeeze(1).eq(label[non_pad_elements])
#
# # Calculate accuracy
# count = (correct.sum().item() - filter_count)
# total = torch.FloatTensor([label[non_pad_elements].shape[0]]).item() - filter_count
#
# return total, count
#
# def categorical_accuracy(self, preds, y):
# max_preds = preds.argmax(dim=1, keepdim=True) # get the index of the max probability
# non_pad_elements = (y != self.tag_pad_index).nonzero()
# correct = max_preds[non_pad_elements].squeeze(1).eq(y[non_pad_elements])
# return correct.sum() / torch.FloatTensor([y[non_pad_elements].shape[0]])
#
# def get_acc(self, outputs, label):
# outputs = outputs.cpu()
# label = label.cpu()
#
# outputs = outputs.argmax(dim=1, keepdim=True)
#
# if self.filter_value is not None:
# count_filter = sum(1 for a, b in zip(outputs, label)
# if a.item() == self.filter_value and b.item() == self.filter_value)
# else:
# count_filter = 0
#
# ignore_index_count = sum(1 for b in label if b.item() == self.tag_pad_index)
#
# count = sum(1 for a, b in zip(outputs, label) if a.item() == b.item()) - count_filter
# total = len(outputs) - count_filter - ignore_index_count
# if total != 0:
# return count / total
# else:
# return 0