-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinput_size_test.py
More file actions
231 lines (179 loc) · 8.53 KB
/
input_size_test.py
File metadata and controls
231 lines (179 loc) · 8.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
#!/usr/bin/env python3
"""
Input Size Testing Script for Face Parsing Models
Tests the effect of different input sizes on model performance
"""
import os
import time
import torch
import torch.nn.functional as F
import numpy as np
from pathlib import Path
import argparse
from typing import Dict, List, Tuple
# Import model components
from models.bisenet import BiSeNet
from models.resnet import ResNetBackbone
from models.efficientnet import EfficientNetBackbone
def get_model_size_mb(model_path: str) -> float:
"""Get model file size in MB"""
if os.path.exists(model_path):
return os.path.getsize(model_path) / (1024 * 1024)
return 0.0
def get_input_size_mb(input_size: Tuple[int, int]) -> float:
"""Calculate input size in MB (3 channels, float32)"""
return (input_size[0] * input_size[1] * 3 * 4) / (1024 * 1024)
def test_model_performance(model_name: str, input_sizes: List[Tuple[int, int]],
device: str = 'cuda') -> Dict:
"""Test model performance across different input sizes"""
print(f"📊 Testing {model_name.upper()}...")
# Initialize model
if 'resnet' in model_name:
backbone = ResNetBackbone(model_name)
elif 'efficientnet' in model_name:
backbone = EfficientNetBackbone(model_name)
else:
raise ValueError(f"Unknown model: {model_name}")
model = BiSeNet(backbone=backbone, n_classes=19)
model.to(device)
model.eval()
results = {}
for input_size in input_sizes:
print(f" Testing input size: {input_size[0]}x{input_size[1]}")
# Create dummy input
dummy_input = torch.randn(1, 3, input_size[0], input_size[1]).to(device)
# Warmup
with torch.no_grad():
for _ in range(10):
_ = model(dummy_input)
# Measure inference time
times = []
with torch.no_grad():
for _ in range(100):
start_time = time.time()
_ = model(dummy_input)
torch.cuda.synchronize() if device == 'cuda' else None
end_time = time.time()
times.append((end_time - start_time) * 1000) # Convert to ms
avg_time = np.mean(times)
fps = 1000 / avg_time
# Measure memory usage
if device == 'cuda':
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
start_memory = torch.cuda.memory_allocated()
with torch.no_grad():
_ = model(dummy_input)
end_memory = torch.cuda.memory_allocated()
memory_increase = (end_memory - start_memory) / (1024 * 1024) # MB
else:
memory_increase = 0.0
results[input_size] = {
'speed_ms': avg_time,
'fps': fps,
'memory_increase_mb': memory_increase
}
print(f" Model Size: {get_model_size_mb(f'./weights/{model_name}.pt'):.1f}MB")
print(f" Speed: {avg_time:.2f}ms ({fps:.1f} FPS)")
print(f" Memory: {memory_increase:.1f}MB")
return results
def print_comprehensive_table(all_results: Dict, input_sizes: List[Tuple[int, int]]):
"""Print comprehensive comparison table"""
print("\n" + "="*120)
print("COMPREHENSIVE INPUT SIZE COMPARISON TABLE")
print("="*120)
# Header
header = f"{'Model':<15} {'Input Size':<12} {'Model Size(MB)':<15} {'Speed(ms)':<12} {'FPS':<10} {'Memory Increase(MB)':<20}"
print(header)
print("-" * 120)
# Data rows
for model_name, model_results in all_results.items():
model_size = get_model_size_mb(f'./weights/{model_name}.pt')
for input_size in input_sizes:
if input_size in model_results:
result = model_results[input_size]
input_size_str = f"{input_size[0]}x{input_size[1]}"
input_size_mb = get_input_size_mb(input_size)
row = f"{model_name:<15} {input_size_str:<12} {model_size:<15.1f} {result['speed_ms']:<12.2f} {result['fps']:<10.1f} {result['memory_increase_mb']:<20.1f}"
print(row)
print("="*120)
def print_analysis(all_results: Dict, input_sizes: List[Tuple[int, int]]):
"""Print detailed analysis"""
print("\n📈 ANALYSIS BY MODEL:")
for model_name, model_results in all_results.items():
print(f"\n🔍 {model_name.upper()}:")
if len(model_results) >= 2:
first_size = input_sizes[0]
last_size = input_sizes[-1]
if first_size in model_results and last_size in model_results:
first_result = model_results[first_size]
last_result = model_results[last_size]
speed_ratio = last_result['speed_ms'] / first_result['speed_ms']
input_ratio = (last_size[0] * last_size[1]) / (first_size[0] * first_size[1])
print(f" ✅ Model size: {get_model_size_mb(f'./weights/{model_name}.pt'):.1f}MB (consistent)")
print(f" 📊 Speed: {first_result['speed_ms']:.2f}ms → {last_result['speed_ms']:.2f}ms ({speed_ratio:.2f}x slower)")
print(f" 📊 Input: {first_size[0]}x{first_size[1]} → {last_size[0]}x{last_size[1]} ({input_ratio:.1f}x larger)")
print("\n📊 OVERALL ANALYSIS:")
# Find fastest models for each input size
for input_size in input_sizes:
fastest_model = None
fastest_time = float('inf')
for model_name, model_results in all_results.items():
if input_size in model_results:
if model_results[input_size]['speed_ms'] < fastest_time:
fastest_time = model_results[input_size]['speed_ms']
fastest_model = model_name
if fastest_model:
print(f" 🏃 Fastest at {input_size[0]}x{input_size[1]}: {fastest_model} ({fastest_time:.2f}ms)")
# Model size ranking
model_sizes = [(model_name, get_model_size_mb(f'./weights/{model_name}.pt'))
for model_name in all_results.keys()]
model_sizes.sort(key=lambda x: x[1])
print("\n📦 Model sizes (smallest to largest):")
for model_name, size in model_sizes:
print(f" 📦 {model_name}: {size:.1f}MB")
def main():
parser = argparse.ArgumentParser(description='Test input size effects on model performance')
parser.add_argument('--models', nargs='+',
default=['resnet18', 'resnet34', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2'],
help='Models to test')
parser.add_argument('--input-sizes', nargs='+', type=int,
default=[128, 256, 512, 1024],
help='Input sizes to test')
parser.add_argument('--device', default='cuda', choices=['cuda', 'cpu'],
help='Device to use for testing')
args = parser.parse_args()
# Check if CUDA is available
if args.device == 'cuda' and not torch.cuda.is_available():
print("⚠️ CUDA not available, switching to CPU")
args.device = 'cpu'
# Prepare input sizes
input_sizes = [(size, size) for size in args.input_sizes]
print("🔍 TESTING INPUT SIZE EFFECTS ON ALL MODELS")
print("="*80)
all_results = {}
# Test each model
for model_name in args.models:
try:
results = test_model_performance(model_name, input_sizes, args.device)
all_results[model_name] = results
except Exception as e:
print(f"❌ Error testing {model_name}: {e}")
continue
# Print comprehensive table
print_comprehensive_table(all_results, input_sizes)
# Print analysis
print_analysis(all_results, input_sizes)
print("\n💡 KEY INSIGHTS:")
print(" • Model size stays constant regardless of input size")
print(" • Input size affects memory usage and inference speed")
print(" • Larger inputs require more memory and are slower")
print(" • Speed scales roughly with input area (width × height)")
print(" • Memory usage scales with input size")
print("\n🎯 RECOMMENDATIONS:")
print(" • Use 256x256 for good balance of speed and accuracy")
print(" • Use 512x512 for higher accuracy (slower)")
print(" • Use 128x128 for maximum speed (lower accuracy)")
print(" • Consider your hardware memory limits")
if __name__ == "__main__":
main()