Skip to content

Commit e425377

Browse files
ZainRizvifacebook-github-bot
authored andcommitted
Revert D48588121: Add support for mul and mul_relu
Differential Revision: D48588121 Original commit changeset: 04f9930effa1 Original Phabricator Diff: D48588121 fbshipit-source-id: afcc4c5a81222e1e1d02845d72c4c8fc87fa43cb
1 parent 06da346 commit e425377

File tree

2 files changed

+4
-7
lines changed

2 files changed

+4
-7
lines changed

examples/models/models.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,5 +155,4 @@ class OptimizationOptions(object):
155155
"add": OptimizationOptions(True, True),
156156
"add_mul": OptimizationOptions(True, True),
157157
"mv2": OptimizationOptions(True, True),
158-
"mv3": OptimizationOptions(True, False),
159158
}

examples/quantization/example.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ def verify_xnnpack_quantizer_matching_fx_quant_model(model_name, model, example_
4646
m = prepare_pt2e(m, quantizer)
4747
# calibration
4848
after_prepare_result = m(*example_inputs)
49-
print("pt2e prepare:", m)
5049
m = convert_pt2e(m)
5150
after_quant_result = m(*example_inputs)
5251

@@ -58,7 +57,6 @@ def verify_xnnpack_quantizer_matching_fx_quant_model(model_name, model, example_
5857
m_copy, qconfig_mapping, example_inputs, backend_config=backend_config
5958
)
6059
after_prepare_result_fx = m_fx(*example_inputs)
61-
print("fx prepare:", m_fx)
6260
m_fx = _convert_to_reference_decomposed_fx(m_fx, backend_config=backend_config)
6361
after_quant_result_fx = m_fx(*example_inputs)
6462

@@ -71,10 +69,10 @@ def verify_xnnpack_quantizer_matching_fx_quant_model(model_name, model, example_
7169
print("m_fx:", m_fx)
7270
print("prepare sqnr:", compute_sqnr(after_prepare_result, after_prepare_result_fx))
7371
assert compute_sqnr(after_prepare_result, after_prepare_result_fx) > 100
74-
print("diff max:", torch.max(after_quant_result - after_quant_result_fx))
75-
print("sqnr:", compute_sqnr(after_quant_result, after_quant_result_fx))
72+
print("quant diff max:", torch.max(after_quant_result - after_quant_result_fx))
7673
assert torch.max(after_quant_result - after_quant_result_fx) < 1e-1
77-
assert compute_sqnr(after_quant_result, after_quant_result_fx) > 35
74+
print("quant sqnr:", compute_sqnr(after_quant_result, after_quant_result_fx))
75+
assert compute_sqnr(after_quant_result, after_quant_result_fx) > 30
7876

7977

8078
if __name__ == "__main__":
@@ -123,7 +121,7 @@ def verify_xnnpack_quantizer_matching_fx_quant_model(model_name, model, example_
123121
raise RuntimeError(
124122
f"Model {args.model_name} is not a valid name. or not quantizable right now, "
125123
"please contact executorch team if you want to learn why or how to support "
126-
"quantization for the requested model "
124+
"quantization for the requested model"
127125
f"Available models are {list(MODEL_NAME_TO_OPTIONS.keys())}."
128126
)
129127

0 commit comments

Comments
 (0)