3636
3737from __future__ import annotations
3838
39- import argparse
40- import csv
4139import json
4240import os
4341
44- from typing import Any , List
45-
4642import numpy as np
4743import pandas as pd
4844import utils
5248from pycocotools .coco import COCO
5349from pycocotools .cocoeval import COCOeval
5450
55- from compressai_vision .evaluators .evaluators import BaseEvaluator
56-
57- CLASSES = ["CLASS-AB" , "CLASS-C" , "CLASS-D" , "CLASS-AB*" ]
58-
59- SEQS_BY_CLASS = {
60- CLASSES [0 ]: [
61- "Traffic" ,
62- "Kimono" ,
63- "ParkScene" ,
64- "Cactus" ,
65- "BasketballDrive" ,
66- "BQTerrace" ,
67- ],
68- CLASSES [1 ]: ["BasketballDrill" , "BQMall" , "PartyScene" , "RaceHorses_832x480" ],
69- CLASSES [2 ]: ["BasketballPass" , "BQSquare" , "BlowingBubbles" , "RaceHorses" ],
70- CLASSES [3 ]: ["Traffic" , "BQTerrace" ],
71- }
72-
7351SEQUENCE_TO_OFFSET = {
74- "Traffic " : 10000 ,
75- "Kimono " : 20000 ,
76- "ParkScene " : 30000 ,
77- "Cactus " : 40000 ,
78- "BasketballDrive " : 50000 ,
79- "BQTerrace " : 60000 ,
80- "BasketballDrill " : 70000 ,
81- "BQMall " : 80000 ,
82- "PartyScene " : 90000 ,
83- "RaceHorses_832x480 " : 100000 ,
84- "BasketballPass " : 110000 ,
85- "BQSquare " : 120000 ,
86- "BlowingBubbles " : 130000 ,
87- "RaceHorses " : 140000 ,
52+ "Traffic_2560x1600_30 " : 10000 ,
53+ "Kimono_1920x1080_24 " : 20000 ,
54+ "ParkScene_1920x1080_24 " : 30000 ,
55+ "Cactus_1920x1080_50 " : 40000 ,
56+ "BasketballDrive_1920x1080_50 " : 50000 ,
57+ "BQTerrace_1920x1080_60 " : 60000 ,
58+ "BasketballDrill_832x480_50 " : 70000 ,
59+ "BQMall_832x480_60 " : 80000 ,
60+ "PartyScene_832x480_50 " : 90000 ,
61+ "RaceHorses_832x480_30 " : 100000 ,
62+ "BasketballPass_416x240_50 " : 110000 ,
63+ "BQSquare_416x240_60 " : 120000 ,
64+ "BlowingBubbles_416x240_50 " : 130000 ,
65+ "RaceHorses_416x240_30 " : 140000 ,
8866}
8967
9068TMP_EVAL_FILE = "tmp_eval.json"
9169TMP_ANCH_FILE = "tmp_anch.json"
9270
71+ NS_SEQ_PREFIX = "ns_" # Prefix of non-scaled sequences
9372
94- def compute_overall_mAP (class_name , items , no_cactus = False ):
95- seq_root_names = SEQS_BY_CLASS [class_name ]
96-
97- if no_cactus and class_name == "CLASS-AB" :
98- if "Cactus" in seq_root_names :
99- seq_root_names .remove ("Cactus" )
10073
74+ def compute_per_class_mAP (seq_root_names , items ):
10175 classwise_instances_results = []
10276 classwise_anchor_images = []
10377 classwise_annotation = []
10478 categories = None
10579 annotation_id = 0
10680 for e , (item , root_name ) in enumerate (zip (items , seq_root_names )):
107- assert root_name in item [utils .SEQ_NAME_KEY ]
81+ assert (
82+ root_name in item [utils .SEQ_NAME_KEY ]
83+ ), f"Not found { root_name } in { item [utils .SEQ_NAME_KEY ]} { utils .SEQ_NAME_KEY } "
84+
85+ root_name = root_name .replace (NS_SEQ_PREFIX , "" )
10886
10987 seq_img_id_offset = SEQUENCE_TO_OFFSET [root_name ]
11088
@@ -150,10 +128,6 @@ def compute_overall_mAP(class_name, items, no_cactus=False):
150128 os .remove (TMP_EVAL_FILE )
151129 os .remove (TMP_ANCH_FILE )
152130
153- # print("\n")
154- # print(summary)
155- # print("\n")
156-
157131 return summary
158132
159133
@@ -171,89 +145,9 @@ def coco_evaluation(ann_file, detections):
171145 coco_eval .accumulate ()
172146 coco_eval .summarize ()
173147
174- import logging
175-
176- class dummyclass :
177- def __init__ (self ):
178- self ._logger = logging .getLogger (__name__ )
179-
180- # things = [i["name"] for i in coco_eval.cocoGt.cats.values()]
181- # out_all = COCOEvaluator._derive_coco_results(
182- # dummyclass(), coco_eval, iou_type="bbox", class_names=things
183- # )
184-
185148 headers = ["AP" , "AP50" , "AP75" , "APS" , "APM" , "APL" ]
186149 npstat = np .array (coco_eval .stats [:6 ])
187150 npstat = npstat * 100 # Percent
188- # npstat = np.around(npstat, 2)
189151 data_frame = pd .DataFrame ([npstat ], columns = headers )
190152
191153 return data_frame
192-
193-
194- if __name__ == "__main__" :
195- parser = argparse .ArgumentParser ()
196-
197- parser .add_argument (
198- "-r" ,
199- "--result_path" ,
200- required = True ,
201- help = "For example, '.../logs/runs/[pipeline]/[codec]/[datacatalog]/' " ,
202- )
203- parser .add_argument (
204- "-q" ,
205- "--quality_index" ,
206- required = False ,
207- default = - 1 ,
208- type = int ,
209- help = "Provide index of quality folders under the `result_path'. quality_index is only meant to point the orderd folders by qp names because there might be different range of qps are used for different sequences" ,
210- )
211- parser .add_argument (
212- "-a" ,
213- "--all_qualities" ,
214- action = "store_true" ,
215- help = "run all 6 rate points in MPEG CTCs" ,
216- )
217- parser .add_argument (
218- "-d" ,
219- "--dataset_path" ,
220- required = True ,
221- help = "For example, '.../vcm_testdata/[dataset]' " ,
222- )
223- parser .add_argument (
224- "-c" ,
225- "--class_to_compute" ,
226- type = str ,
227- choices = CLASSES ,
228- required = True ,
229- )
230-
231- args = parser .parse_args ()
232- if args .all_qualities :
233- qualities = range (0 , 6 )
234- else :
235- qualities = [args .quality_index ]
236-
237- with open (
238- f"{ args .result_path } /{ args .class_to_compute } .csv" , "w" , newline = ""
239- ) as file :
240- writer = csv .writer (file )
241- for q in qualities :
242- items = utils .search_items (
243- args .result_path ,
244- args .dataset_path ,
245- q ,
246- SEQS_BY_CLASS [args .class_to_compute ],
247- BaseEvaluator .get_coco_eval_info_name ,
248- )
249-
250- assert (
251- len (items ) > 0
252- ), "Nothing relevant information found from given directories..."
253-
254- summary = compute_overall_mAP (args .class_to_compute , items )
255-
256- writer .writerow ([f"{ q } " , f"{ summary ['AP' ][0 ]:.4f} " ])
257- print (f"{ '=' * 10 } FINAL OVERALL mAP SUMMARY { '=' * 10 } " )
258- print (f"{ '-' * 32 } AP : { summary ['AP' ][0 ]:.4f} " )
259- print ("\n \n " )
0 commit comments