-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtest.py
More file actions
129 lines (119 loc) · 5.06 KB
/
test.py
File metadata and controls
129 lines (119 loc) · 5.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import json
import torch
from tqdm import tqdm
from train import GaussianTrainTask
from my_ext import utils
from datasets.base import DynamceSceneDataset
class SuperpointGaussianTestTask(GaussianTrainTask):
def extra_config(self, parser):
super().extra_config(parser)
utils.add_bool_option(parser, '--fps', default=False)
utils.add_cfg_option(parser, '--lpips', default=True)
utils.add_cfg_option(parser, '--lpips-alex', default=False)
utils.add_cfg_option(parser, '--lpips-vgg', default=False)
utils.add_cfg_option(parser, '--ssim', default=True)
utils.add_cfg_option(parser, '--ms-ssim', default=True)
utils.add_cfg_option(parser, '--stage', default=None)
parser.set_defaults(test=True)
def step_2_environment(self, *args, **kwargs):
metric = 'image/PSNR'
if self.cfg.ssim:
metric = metric + '/SSIM'
if self.cfg.ms_ssim:
metric = metric + '/MS_SSIM'
if self.cfg.lpips or self.cfg.lpips_alex:
metric = metric + '/LPIPS'
if self.cfg.lpips_vgg:
metric = metric + '/LPIPS_VGG'
self.cfg.metrics = metric
super().step_2_environment(*args, **kwargs)
@torch.no_grad()
def run(self):
self.model.eval()
save_images = []
save_images_c = []
if self.cfg.test:
db = self.test_db # noqa
self.logger.info('using test split')
elif self.cfg.eval:
db = self.eval_db # noqa
self.logger.info('using eval split')
else:
db = self.train_db # noqa
self.logger.info('using train split')
db: DynamceSceneDataset
times = db.times.cuda()
Tw2v = db.Tw2v.cuda()
Tv2c = db.Tv2c.cuda().expand_as(Tw2v)
campos = db.Tv2w[:, :3, 3].cuda()
background = torch.zeros((1, 1, 3)).cuda() if db.background_type != 'white' else torch.ones((1, 1, 3)).cuda()
start_time = torch.cuda.Event(enable_timing=True)
start_time.synchronize()
end_time = torch.cuda.Event(enable_timing=True)
start_time.record()
for i in tqdm(range(len(db))):
if hasattr(db, 'camera_ids') and db.num_cameras > 1:
cid = db.camera_ids[i].item()
else:
cid = i
outputs = self.model.render(
info={
'Tw2v': Tw2v[cid],
'Tv2c': Tv2c[cid],
'campos': campos[cid],
'size': db.image_size,
'FoV': db.FoV[cid] if db.FoV.ndim == 2 else db.FoV,
},
t=times[i], background=background,
**utils.merge_dict(self.cfg.test_kwargs, stage=self.cfg.stage)
)
save_images.append(outputs['images'].clamp(0, 1))
if 'images_c' in outputs:
save_images_c.append(outputs['images_c'])
end_time.record()
end_time.synchronize()
self.logger.info(f"[red]FPS: {len(times) * 1000 / start_time.elapsed_time(end_time)}")
self.logger.info('Begin evalution')
save_dir = self.output.joinpath('test')
utils.dir_create_and_clear(save_dir, "*.png")
for i in tqdm(range(len(times))):
gt_img = db.get_image(i)[..., :3]
pd_img = save_images[i]
self.metric_manager.update('image', pd_img, gt_img)
utils.save_image(save_dir.joinpath(f"{i:04d}.png"), pd_img)
results = self.metric_manager.summarize()['image']
self.logger.info(results)
if len(save_images_c) > 0:
self.metric_manager.reset()
for i in tqdm(range(len(times))):
gt_img = db.images[i, :, :, :3]
pd_img = save_images_c[i]
self.metric_manager.update('image', pd_img, gt_img)
utils.save_image(save_dir.joinpath(f"{i:04d}.png"), pd_img)
self.logger.info(f"images_c: {self.metric_manager.summarize()}")
if self.cfg.fps:
start_time.synchronize()
start_time.record()
for i in tqdm(range(1000)):
outputs = self.model.render(
info={
'Tw2v': Tw2v[0],
'Tv2c': Tv2c[0],
'campos': campos[0],
'size': db.image_size,
'FoV': db.FoV[0] if db.FoV.ndim == 2 else db.FoV,
},
t=times.new_tensor([i / 1000]),
background=background,
**utils.merge_dict(self.cfg.test_kwargs, stage=self.cfg.stage, ),
)
save_images.append(outputs['images'])
end_time.record()
end_time.synchronize()
FPS = 1000 * 1000 / start_time.elapsed_time(end_time)
self.logger.info(f"[red]FPS: {FPS}")
results['FPS'] = FPS
with self.output.joinpath('results.json').open('w') as f:
json.dump(results, f, indent=2)
if __name__ == '__main__':
SuperpointGaussianTestTask().run()