Skip to content

Commit 0ea85b7

Browse files
YOLO v8: check_export_not_strict
1 parent 8831330 commit 0ea85b7

File tree

4 files changed

+27
-6
lines changed

4 files changed

+27
-6
lines changed

aa_torch_fx.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -403,12 +403,12 @@ def process_model(model_name: str):
403403
##############################################################
404404
# Process PT Quantize
405405
##############################################################
406-
# fx_2_ov_quantization(pt_model, example_input, output_dir, result, val_loader, shape_input)
406+
fx_2_ov_quantization(pt_model, example_input, output_dir, result, val_loader, shape_input)
407407

408408
##############################################################
409409
# Process NNCF FX Quantize
410410
##############################################################
411-
nncf_fx_2_ov_quantization(pt_model, example_input, output_dir, result, val_loader, shape_input)
411+
# nncf_fx_2_ov_quantization(pt_model, example_input, output_dir, result, val_loader, shape_input)
412412

413413
##############################################################
414414
# Process NNCF Quantize by PT

examples/post_training_quantization/openvino/yolov8/main.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -371,5 +371,24 @@ def main():
371371
return fp_stats["metrics/mAP50-95(B)"], q_stats["metrics/mAP50-95(B)"], fp_model_perf, quantized_model_perf
372372

373373

374+
def check_export_not_strict():
375+
model = YOLO(f"{ROOT}/{MODEL_NAME}.pt")
376+
377+
# Prepare validation dataset and helper
378+
validator, data_loader = prepare_validation_new(model, "coco128.yaml")
379+
380+
batch = next(iter(data_loader))
381+
batch = validator.preprocess(batch)
382+
383+
model.model(batch["img"])
384+
ex_model = torch.export.export(model.model, args=(batch["img"],), strict=False)
385+
ex_model = capture_pre_autograd_graph(ex_model.module(), args=(batch["img"],))
386+
387+
fp_stats, total_images, total_objects = validate_fx(ex_model, tqdm(data_loader), validator)
388+
print("Floating-point ex strict=False")
389+
print_statistics(fp_stats, total_images, total_objects)
390+
391+
374392
if __name__ == "__main__":
375-
main()
393+
check_export_not_strict()
394+
# main()

nncf/experimental/torch_fx/nncf_graph_builder.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,7 @@ def view_to_reshape(model: torch.fx.GraphModule):
236236
continue
237237
with model.graph.inserting_after(n):
238238
reshape = model.graph.create_node("call_function", torch.ops.aten.reshape.default, tuple(n.args), {})
239+
reshape.meta = n.meta
239240

240241
for user in list(n.users):
241242
user.replace_input_with(n, reshape)
@@ -295,7 +296,7 @@ def create_nncf_graph(model: torch.fx.GraphModule) -> NNCFGraph:
295296
GraphConverter.separate_conv_and_bias(model)
296297
GraphConverter.unfold_scaled_dot_product_attention(model)
297298
GraphConverter.view_to_reshape(model)
298-
breakpoint()
299+
# breakpoint()
299300

300301
nncf_graph = PTNNCFGraph()
301302

nncf/experimental/torch_fx/transformations.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,9 @@ def insert_one_qdq(
105105
dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.default
106106

107107
# Quantized functions accepts only uint8 as an input
108-
if target_point.target_type != TargetType.OPERATION_WITH_WEIGHTS and qparams["_dtype_"] == torch.int8:
109-
raise RuntimeError("Wrong parameters: activations should always be uint8")
108+
# if target_point.target_type != TargetType.OPERATION_WITH_WEIGHTS and qparams["_dtype_"] == torch.int8:
109+
# breakpoint()
110+
# raise RuntimeError("Wrong parameters: activations should always be uint8")
110111

111112
# TODO: map FakeQuantizePramaeters to qparams for quantize/dequantize
112113
# 2. replace activation_post_process node with quantize and dequantize

0 commit comments

Comments
 (0)