-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathval.py
More file actions
77 lines (68 loc) · 4.25 KB
/
val.py
File metadata and controls
77 lines (68 loc) · 4.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import warnings
warnings.filterwarnings('ignore')
import os
import numpy as np
from prettytable import PrettyTable
from ultralytics import YOLO
from ultralytics.utils.torch_utils import model_info
def get_weight_size(path):
stats = os.stat(path)
return f'{stats.st_size / 1024 / 1024:.1f}'
if __name__ == '__main__': # exp2 exp15 3 [exp17 exp28] exp34 exp4 exp35
model_path = 'runs/train/exp6/weights/best.pt' # 测试结果都为0的话,用fp32的
model = YOLO(model_path) # 选择训练好的权重路径
result = model.val(data='/home/lenovo/data/liujiaji/ultralytics-yolo11-main/dataset/VisDrone.yaml',
split='test', # split可以选择train、val、test 根据自己的数据集情况来选择.
imgsz=640,
batch=4, # 该值对结果的影响不大
# iou=0.5,
# rect=False,
# save_json=True, # if you need to cal coco metrice
project='runs/test',
name='reexp',
)
# print(result.results_dict)
# if model.task == 'detect':
# length = result.box.p.size
# model_names = list(result.names.values())
# preprocess_time_per_image = result.speed['preprocess']
# inference_time_per_image = result.speed['inference']
# postprocess_time_per_image = result.speed['postprocess']
# all_time_per_image = preprocess_time_per_image + inference_time_per_image + postprocess_time_per_image
# n_l, n_p, n_g, flops = model_info(model.model)
# model_info_table = PrettyTable()
# model_info_table.title = "Model Info"
# model_info_table.field_names = ["GFLOPs", "Parameters", "前处理时间/一张图", "推理时间/一张图", "后处理时间/一张图", "FPS(前处理+模型推理+后处理)", "FPS(推理)", "Model File Size"]
# model_info_table.add_row([f'{flops:.1f}', f'{n_p:,}',
# f'{preprocess_time_per_image / 1000:.6f}s', f'{inference_time_per_image / 1000:.6f}s',
# f'{postprocess_time_per_image / 1000:.6f}s', f'{1000 / all_time_per_image:.2f}',
# f'{1000 / inference_time_per_image:.2f}', f'{get_weight_size(model_path)}MB'])
# print(model_info_table)
# model_metrice_table = PrettyTable()
# model_metrice_table.title = "Model Metrice"
# model_metrice_table.field_names = ["Class Name", "Precision", "Recall", "F1-Score", "mAP50", "mAP75", "mAP50-95"]
# for idx in range(length):
# model_metrice_table.add_row([
# model_names[idx],
# f"{result.box.p[idx]:.4f}",
# f"{result.box.r[idx]:.4f}",
# f"{result.box.f1[idx]:.4f}",
# f"{result.box.ap50[idx]:.4f}",
# f"{result.box.all_ap[idx, 5]:.4f}", # 50 55 60 65 70 75 80 85 90 95
# f"{result.box.ap[idx]:.4f}"
# ])
# model_metrice_table.add_row([
# "all(平均数据)",
# f"{result.results_dict['metrics/precision(B)']:.4f}",
# f"{result.results_dict['metrics/recall(B)']:.4f}",
# f"{np.mean(result.box.f1[:length]):.4f}",
# f"{result.results_dict['metrics/mAP50(B)']:.4f}",
# f"{np.mean(result.box.all_ap[:length, 5]):.4f}", # 50 55 60 65 70 75 80 85 90 95
# f"{result.results_dict['metrics/mAP50-95(B)']:.4f}"
# ])
# print(model_metrice_table)
# with open(result.save_dir / 'paper_data.txt', 'w+', errors="ignore", encoding="utf-8") as f:
# f.write(str(model_info_table))
# f.write('\n')
# f.write(str(model_metrice_table))
# print('-'*20, f'结果已保存至{result.save_dir}/paper_data.txt...', '-'*20)