We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 03f6bcc commit f7e72eeCopy full SHA for f7e72ee
examples/xnnpack/aot_compiler.py
@@ -87,14 +87,14 @@
87
88
model = model.eval()
89
# pre-autograd export. eventually this will become torch.export
90
- ep = torch.export.export_for_training(model, example_inputs, strict=True)
+ ep = torch.export.export_for_training(model, example_inputs, strict=False)
91
model = ep.module()
92
93
if args.quantize:
94
logging.info("Quantizing Model...")
95
# TODO(T165162973): This pass shall eventually be folded into quantizer
96
model = quantize(model, example_inputs, quant_type)
97
98
99
edge = to_edge_transform_and_lower(
100
ep,
0 commit comments