Skip to content

Commit f6efcc3

Browse files
committed
refctor: clarify the generation of metrics per class
1 parent ca20621 commit f6efcc3

File tree

3 files changed

+20
-35
lines changed

3 files changed

+20
-35
lines changed

scripts/metrics/compute_per_class_map.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,6 @@
4747
from pycocotools.coco import COCO
4848
from pycocotools.cocoeval import COCOeval
4949

50-
CLASSES = ["CLASS-AB", "CLASS-C", "CLASS-D", "CLASS-AB*"]
51-
52-
5350
SEQUENCE_TO_OFFSET = {
5451
"Traffic_2560x1600_30": 10000,
5552
"Kimono_1920x1080_24": 20000,
@@ -147,10 +144,9 @@ def coco_evaluation(ann_file, detections):
147144
coco_eval.accumulate()
148145
coco_eval.summarize()
149146

150-
151147
headers = ["AP", "AP50", "AP75", "APS", "APM", "APL"]
152148
npstat = np.array(coco_eval.stats[:6])
153149
npstat = npstat * 100 # Percent
154150
data_frame = pd.DataFrame([npstat], columns=headers)
155151

156-
return data_frame
152+
return data_frame

scripts/metrics/compute_per_class_miou.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838

3939
import json
4040

41+
4142
def compute_per_class_mIoU(items):
4243
miou_acc = 0.0
4344
for item in items:

scripts/metrics/gen_mpeg_cttc_csv.py

Lines changed: 18 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -103,44 +103,37 @@ def df_append(df1, df2):
103103

104104

105105
def generate_class_df(result_df, classes: dict):
106-
class_data = pd.DataFrame(columns=result_df.columns)
107-
class_data.drop(columns=["fps", "num_of_coded_frame"], inplace=True)
108-
109-
for tag, item in classes.items():
110-
output = compute_per_class_results(result_df, tag, item)
111-
classwise_df = df_append(class_data, output)
106+
assert (
107+
len(classes) == 1
108+
), "generate_class_df is expected to be called with a single class entry"
112109

113-
return classwise_df
110+
((tag, sequences),) = classes.items()
111+
return compute_per_class_results(result_df, tag, sequences)
114112

115113

116114
def compute_per_class_results(result_df, name, sequences):
117-
samples = None
118-
num_points = prev_num_points = -1
119-
output = pd.DataFrame(columns=result_df.columns)
120-
output.drop(columns=["fps", "num_of_coded_frame"], inplace=True)
115+
per_sequence_frames = []
116+
num_points = None
121117

122118
for seq in sequences:
123-
d = result_df.loc[(result_df["Dataset"] == seq)]
119+
seq_frames = result_df.loc[result_df["Dataset"] == seq]
124120

125-
if samples is None:
126-
samples = d
121+
if num_points is None:
122+
num_points = seq_frames.shape[0]
127123
else:
128-
samples = df_append(samples, d)
124+
assert num_points == seq_frames.shape[0]
129125

130-
if prev_num_points == -1:
131-
num_points = prev_num_points = d.shape[0]
132-
else:
133-
assert prev_num_points == d.shape[0]
126+
per_sequence_frames.append(seq_frames)
134127

128+
samples = pd.concat(per_sequence_frames, ignore_index=True)
135129
samples["length"] = samples["num_of_coded_frame"] / samples["fps"]
136130

131+
output = result_df.drop(columns=["fps", "num_of_coded_frame"]).head(0).copy()
132+
137133
for i in range(num_points):
138-
# print(f"Set - {i}")
139134
points = samples.iloc[range(i, samples.shape[0], num_points)]
140135
total_length = points["length"].sum()
141136

142-
# print(points)
143-
144137
new_row = {
145138
output.columns[0]: [
146139
name,
@@ -176,8 +169,7 @@ def generate_csv_classwise_video_map(
176169
seq_prefix: str = None,
177170
dataset_prefix: str = None,
178171
):
179-
seq_list = []
180-
[seq_list.extend(sequences) for sequences in dict_of_class_seq.values()]
172+
seq_list = [seq for sequences in dict_of_class_seq.values() for seq in sequences]
181173

182174
opts_metrics = {"AP": 0, "AP50": 1, "AP75": 2, "APS": 3, "APM": 4, "APL": 5}
183175
results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points)
@@ -212,9 +204,7 @@ def generate_csv_classwise_video_map(
212204
), "No evaluation information found in provided result directories..."
213205

214206
if not skip_classwise:
215-
summary = compute_per_class_mAP(
216-
dict_of_class_seq[class_name], items
217-
)
207+
summary = compute_per_class_mAP(dict_of_class_seq[class_name], items)
218208
maps = summary.values[0][opts_metrics[metric]]
219209
class_wise_maps.append(maps)
220210

@@ -327,9 +317,7 @@ def generate_csv_classwise_video_miou(
327317
name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY])
328318
matched_seq_names.append(name)
329319

330-
class_wise_results_df = generate_class_df(
331-
results_df, {class_name: class_seqs}
332-
)
320+
class_wise_results_df = generate_class_df(results_df, {class_name: class_seqs})
333321

334322
class_wise_results_df["end_accuracy"] = class_wise_mious
335323

0 commit comments

Comments
 (0)