Skip to content

Commit a9625a6

Browse files
committed
Create separate function for executing benchmark_app
1 parent af07577 commit a9625a6

File tree

13 files changed

+54
-174
lines changed

13 files changed

+54
-174
lines changed

examples/__init__.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,3 +8,20 @@
88
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
11+
12+
from re import compile
13+
from subprocess import check_output # nosec B404: used only for executing benchmark_app
14+
15+
throughput_pattern = compile(r"Throughput\: (.+?) FPS")
16+
17+
18+
def execute_benchmark_on_cpu(model_path, time, shape=None):
19+
command = ["benchmark_app", "-m", model_path.as_posix(), "-d", "CPU", "-api", "async", "-t", str(time)]
20+
if shape is not None:
21+
command += ["-shape", str(shape)]
22+
23+
cmd_output = check_output(command, text=True) # nosec B603: used only for executing benchmark_app
24+
print(*cmd_output.splitlines()[-8:], sep="\n")
25+
26+
match = throughput_pattern.search(cmd_output)
27+
return float(match.group(1))

examples/post_training_quantization/onnx/mobilenet_v2/main.py

Lines changed: 3 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12-
import re
13-
import subprocess
1412
from pathlib import Path
1513

1614
import numpy as np
@@ -25,6 +23,7 @@
2523
from torchvision import transforms
2624

2725
import nncf
26+
from examples import execute_benchmark_on_cpu
2827

2928
ROOT = Path(__file__).parent.resolve()
3029
MODEL_URL = "https://huggingface.co/alexsu52/mobilenet_v2_imagenette/resolve/main/mobilenet_v2_imagenette.onnx"
@@ -61,21 +60,6 @@ def validate(path_to_model: Path, validation_loader: torch.utils.data.DataLoader
6160
return accuracy_score(predictions, references)
6261

6362

64-
def run_benchmark(path_to_model: Path, shape: list[int]) -> float:
65-
command = [
66-
"benchmark_app",
67-
"-m", path_to_model.as_posix(),
68-
"-d", "CPU",
69-
"-api", "async",
70-
"-t", "15",
71-
"-shape", str(shape),
72-
] # fmt: skip
73-
cmd_output = subprocess.check_output(command, text=True) # nosec
74-
print(*cmd_output.splitlines()[-8:], sep="\n")
75-
match = re.search(r"Throughput\: (.+?) FPS", str(cmd_output))
76-
return float(match.group(1))
77-
78-
7963
def get_model_size(path: Path, m_type: str = "Mb") -> float:
8064
model_size = path.stat().st_size
8165
for t in ["bytes", "Kb", "Mb"]:
@@ -152,9 +136,9 @@ def transform_fn(data_item):
152136
int8_model_size = get_model_size(int8_model_path)
153137

154138
print("[3/7] Benchmark FP32 model:")
155-
fp32_fps = run_benchmark(fp32_model_path, shape=[1, 3, 224, 224])
139+
fp32_fps = execute_benchmark_on_cpu(fp32_model_path, time=15, shape=[1, 3, 224, 224])
156140
print("[4/7] Benchmark INT8 model:")
157-
int8_fps = run_benchmark(int8_model_path, shape=[1, 3, 224, 224])
141+
int8_fps = execute_benchmark_on_cpu(int8_model_path, time=15, shape=[1, 3, 224, 224])
158142

159143
print("[5/7] Validate ONNX FP32 model in OpenVINO:")
160144
fp32_top1 = validate(fp32_model_path, val_loader)

examples/post_training_quantization/onnx/yolov8_quantize_with_accuracy_control/deploy.py

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12-
import re
13-
import subprocess
1412
from pathlib import Path
1513

1614
import openvino as ov
@@ -22,6 +20,7 @@
2220
from ultralytics.utils import DEFAULT_CFG
2321
from ultralytics.utils.metrics import ConfusionMatrix
2422

23+
from examples import execute_benchmark_on_cpu
2524
from examples.post_training_quantization.onnx.yolov8_quantize_with_accuracy_control.main import prepare_validation
2625
from examples.post_training_quantization.onnx.yolov8_quantize_with_accuracy_control.main import print_statistics
2726

@@ -64,20 +63,6 @@ def validate_ov_model(
6463
return stats, validator.seen, validator.metrics.nt_per_class.sum()
6564

6665

67-
def run_benchmark(model_path: Path, config) -> float:
68-
command = [
69-
"benchmark_app",
70-
"-m", model_path.as_posix(),
71-
"-d", "CPU",
72-
"-api", "async",
73-
"-t", "30",
74-
"-shape", str([1, 3, config.imgsz, config.imgsz]),
75-
] # fmt: skip
76-
cmd_output = subprocess.check_output(command, text=True) # nosec
77-
match = re.search(r"Throughput\: (.+?) FPS", cmd_output)
78-
return float(match.group(1))
79-
80-
8166
args = get_cfg(cfg=DEFAULT_CFG)
8267
args.data = "coco128-seg.yaml"
8368

@@ -90,11 +75,11 @@ def run_benchmark(model_path: Path, config) -> float:
9075
ov.save_model(int8_ov_model, INT8_OV_MODEL_PATH, compress_to_fp16=False)
9176

9277
print("[3/7] Benchmark FP32 OpenVINO model:", end=" ")
93-
fp32_fps = run_benchmark(FP32_OV_MODEL_PATH, args)
78+
fp32_fps = execute_benchmark_on_cpu(FP32_OV_MODEL_PATH, time=30, shape=[1, 3, args.imgsz, args.imgsz])
9479
print(f"{fp32_fps} FPS")
9580

9681
print("[4/7] Benchmark INT8 OpenVINO model:", end=" ")
97-
int8_fps = run_benchmark(INT8_OV_MODEL_PATH, args)
82+
int8_fps = execute_benchmark_on_cpu(INT8_OV_MODEL_PATH, time=30, shape=[1, 3, args.imgsz, args.imgsz])
9883
print(f"{int8_fps} FPS")
9984

10085
validator, data_loader = prepare_validation(YOLO(ROOT / f"{MODEL_NAME}.pt"), args)

examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py

Lines changed: 3 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,6 @@
1010
# limitations under the License.
1111

1212
import json
13-
import re
14-
import subprocess
1513
import sys
1614
from functools import partial
1715
from pathlib import Path
@@ -26,6 +24,7 @@
2624
from anomalib.utils.metrics import create_metric_collection
2725

2826
import nncf
27+
from examples import execute_benchmark_on_cpu
2928

3029
ROOT = Path(__file__).parent.resolve()
3130
HOME_PATH = Path.home()
@@ -82,21 +81,6 @@ def validate(
8281
return metric_value, per_sample_metric_values
8382

8483

85-
def run_benchmark(model_path: Path, shape: list[int]) -> float:
86-
command = [
87-
"benchmark_app",
88-
"-m", model_path.as_posix(),
89-
"-d", "CPU",
90-
"-api", "async",
91-
"-t", "15",
92-
"-shape", str(shape),
93-
] # fmt: skip
94-
cmd_output = subprocess.check_output(command, text=True) # nosec
95-
print(*cmd_output.splitlines()[-8:], sep="\n")
96-
match = re.search(r"Throughput\: (.+?) FPS", cmd_output)
97-
return float(match.group(1))
98-
99-
10084
def get_model_size(ir_path: Path, m_type: str = "Mb") -> float:
10185
xml_size = ir_path.stat().st_size
10286
bin_size = ir_path.with_suffix(".bin").stat().st_size
@@ -182,9 +166,9 @@ def transform_fn(data_item):
182166
int8_size = get_model_size(int8_ir_path)
183167

184168
print("[3/7] Benchmark FP32 model:")
185-
fp32_fps = run_benchmark(fp32_ir_path, shape=[1, 3, 256, 256])
169+
fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=[1, 3, 256, 256])
186170
print("[4/7] Benchmark INT8 model:")
187-
int8_fps = run_benchmark(int8_ir_path, shape=[1, 3, 256, 256])
171+
int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=[1, 3, 256, 256])
188172

189173
print("[5/7] Validate OpenVINO FP32 model:")
190174
compiled_model = ov.compile_model(ov_model, device_name="CPU")

examples/post_training_quantization/openvino/mobilenet_v2/main.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12-
import re
13-
import subprocess
1412
from pathlib import Path
1513

1614
import numpy as np
@@ -23,6 +21,7 @@
2321
from torchvision import transforms
2422

2523
import nncf
24+
from examples import execute_benchmark_on_cpu
2625

2726
ROOT = Path(__file__).parent.resolve()
2827
DATASET_PATH = Path().home() / ".cache" / "nncf" / "datasets"
@@ -54,14 +53,6 @@ def validate(model: ov.Model, val_loader: torch.utils.data.DataLoader) -> float:
5453
return accuracy_score(predictions, references)
5554

5655

57-
def run_benchmark(model_path: Path, shape: list[int]) -> float:
58-
cmd = ["benchmark_app", "-m", model_path.as_posix(), "-d", "CPU", "-api", "async", "-t", "15", "-shape", str(shape)]
59-
cmd_output = subprocess.check_output(cmd, text=True) # nosec
60-
print(*cmd_output.splitlines()[-8:], sep="\n")
61-
match = re.search(r"Throughput\: (.+?) FPS", cmd_output)
62-
return float(match.group(1))
63-
64-
6556
def get_model_size(ir_path: Path, m_type: str = "Mb") -> float:
6657
xml_size = ir_path.stat().st_size
6758
bin_size = ir_path.with_suffix(".bin").stat().st_size
@@ -141,9 +132,10 @@ def transform_fn(data_item):
141132
int8_model_size = get_model_size(int8_ir_path)
142133

143134
print("[3/7] Benchmark FP32 model:")
144-
fp32_fps = run_benchmark(fp32_ir_path, shape=[1, 3, 224, 224])
135+
fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=[1, 3, 224, 224])
145136
print("[4/7] Benchmark INT8 model:")
146-
int8_fps = run_benchmark(int8_ir_path, shape=[1, 3, 224, 224])
137+
int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=[1, 3, 224, 224])
138+
147139

148140
print("[5/7] Validate OpenVINO FP32 model:")
149141
fp32_top1 = validate(ov_model, val_data_loader)

examples/post_training_quantization/openvino/yolo26/main.py

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
11-
import re
12-
import subprocess
1311
from pathlib import Path
1412
from typing import Any
1513

@@ -26,6 +24,7 @@
2624
from ultralytics.utils.metrics import ConfusionMatrix
2725

2826
import nncf
27+
from examples import execute_benchmark_on_cpu
2928

3029
MODEL_NAME = "yolo26n"
3130

@@ -83,20 +82,6 @@ def prepare_validation(model: YOLO, args: Any) -> tuple[DetectionValidator, torc
8382
return validator, data_loader
8483

8584

86-
def benchmark_performance(model_path: Path, config) -> float:
87-
command = [
88-
"benchmark_app",
89-
"-m", model_path.as_posix(),
90-
"-d", "CPU",
91-
"-api", "async",
92-
"-t", "30",
93-
"-shape", str([1, 3, config.imgsz, config.imgsz]),
94-
] # fmt: skip
95-
cmd_output = subprocess.check_output(command, text=True) # nosec
96-
match = re.search(r"Throughput\: (.+?) FPS", cmd_output)
97-
return float(match.group(1))
98-
99-
10085
def prepare_openvino_model(model: YOLO, model_name: str) -> tuple[ov.Model, Path]:
10186
ir_model_path = ROOT / f"{model_name}_openvino_model" / f"{model_name}.xml"
10287
if not ir_model_path.exists():
@@ -162,11 +147,11 @@ def main():
162147
print_statistics(q_stats, total_images, total_objects)
163148

164149
# Benchmark performance of FP32 model
165-
fp_model_perf = benchmark_performance(ov_model_path, args)
150+
fp_model_perf = execute_benchmark_on_cpu(ov_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz])
166151
print(f"Floating-point model performance: {fp_model_perf} FPS")
167152

168153
# Benchmark performance of quantized model
169-
quantized_model_perf = benchmark_performance(quantized_model_path, args)
154+
quantized_model_perf = execute_benchmark_on_cpu(quantized_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz])
170155
print(f"Quantized model performance: {quantized_model_perf} FPS")
171156

172157
return fp_stats["metrics/mAP50-95(B)"], q_stats["metrics/mAP50-95(B)"], fp_model_perf, quantized_model_perf

examples/post_training_quantization/openvino/yolov8_quantize_with_accuracy_control/main.py

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
11-
import re
12-
import subprocess
1311
from functools import partial
1412
from pathlib import Path
1513
from typing import Any
@@ -28,6 +26,7 @@
2826
from ultralytics.utils.metrics import ConfusionMatrix
2927

3028
import nncf
29+
from examples import execute_benchmark_on_cpu
3130

3231
MODEL_NAME = "yolov8n-seg"
3332

@@ -109,20 +108,6 @@ def prepare_validation(model: YOLO, args: Any) -> tuple[SegmentationValidator, t
109108
return validator, data_loader
110109

111110

112-
def benchmark_performance(model_path, config) -> float:
113-
command = [
114-
"benchmark_app",
115-
"-m", model_path.as_posix(),
116-
"-d", "CPU",
117-
"-api", "async",
118-
"-t", "30",
119-
"-shape", str([1, 3, config.imgsz, config.imgsz]),
120-
] # fmt: skip
121-
cmd_output = subprocess.check_output(command, text=True) # nosec
122-
match = re.search(r"Throughput\: (.+?) FPS", cmd_output)
123-
return float(match.group(1))
124-
125-
126111
def prepare_openvino_model(model: YOLO, model_name: str) -> tuple[ov.Model, Path]:
127112
ir_model_path = ROOT / f"{model_name}_openvino_model" / f"{model_name}.xml"
128113
if not ir_model_path.exists():
@@ -235,11 +220,11 @@ def main():
235220
print_statistics(q_stats, total_images, total_objects)
236221

237222
# Benchmark performance of FP32 model
238-
fp_model_perf = benchmark_performance(ov_model_path, args)
223+
fp_model_perf = execute_benchmark_on_cpu(ov_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz])
239224
print(f"Floating-point model performance: {fp_model_perf} FPS")
240225

241226
# Benchmark performance of quantized model
242-
quantized_model_perf = benchmark_performance(quantized_model_path, args)
227+
quantized_model_perf = execute_benchmark_on_cpu(quantized_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz])
243228
print(f"Quantized model performance: {quantized_model_perf} FPS")
244229

245230
return fp_stats["metrics/mAP50-95(B)"], q_stats["metrics/mAP50-95(B)"], fp_model_perf, quantized_model_perf

examples/post_training_quantization/torch/mobilenet_v2/main.py

Lines changed: 4 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12-
import re
13-
import subprocess
1412
from functools import partial
1513
from pathlib import Path
1614

@@ -25,6 +23,7 @@
2523
from torchvision import transforms
2624

2725
import nncf
26+
from examples import execute_benchmark_on_cpu
2827

2928
ROOT = Path(__file__).parent.resolve()
3029
CHECKPOINT_URL = "https://huggingface.co/alexsu52/mobilenet_v2_imagenette/resolve/main/pytorch_model.bin"
@@ -61,21 +60,6 @@ def validate(model: ov.Model, val_loader: torch.utils.data.DataLoader) -> float:
6160
return accuracy_score(predictions, references)
6261

6362

64-
def run_benchmark(model_path: Path, shape: list[int]) -> float:
65-
command = [
66-
"benchmark_app",
67-
"-m", model_path.as_posix(),
68-
"-d", "CPU",
69-
"-api", "async",
70-
"-t", "15",
71-
"-shape", str(shape),
72-
] # fmt: skip
73-
cmd_output = subprocess.check_output(command, text=True) # nosec
74-
print(*cmd_output.splitlines()[-8:], sep="\n")
75-
match = re.search(r"Throughput\: (.+?) FPS", cmd_output)
76-
return float(match.group(1))
77-
78-
7963
def get_model_size(ir_path: Path, m_type: str = "Mb") -> float:
8064
xml_size = ir_path.stat().st_size
8165
bin_size = ir_path.with_suffix(".bin").stat().st_size
@@ -165,9 +149,10 @@ def transform_fn(data_item: tuple[torch.Tensor, int], device: torch.device) -> t
165149
int8_model_size = get_model_size(int8_ir_path)
166150

167151
print("[3/7] Benchmark FP32 model:")
168-
fp32_fps = run_benchmark(fp32_ir_path, shape=[1, 3, 224, 224])
152+
fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=[1, 3, 224, 224])
153+
169154
print("[4/7] Benchmark INT8 model:")
170-
int8_fps = run_benchmark(int8_ir_path, shape=[1, 3, 224, 224])
155+
int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=[1, 3, 224, 224])
171156

172157
print("[5/7] Validate OpenVINO FP32 model:")
173158
fp32_top1 = validate(ov_model, val_data_loader)

0 commit comments

Comments
 (0)