diff --git a/examples/xnnpack/aot_compiler.py b/examples/xnnpack/aot_compiler.py index f67150169dc..79496c82a58 100644 --- a/examples/xnnpack/aot_compiler.py +++ b/examples/xnnpack/aot_compiler.py @@ -87,14 +87,14 @@ model = model.eval() # pre-autograd export. eventually this will become torch.export - ep = torch.export.export_for_training(model, example_inputs, strict=True) + ep = torch.export.export_for_training(model, example_inputs, strict=False) model = ep.module() if args.quantize: logging.info("Quantizing Model...") # TODO(T165162973): This pass shall eventually be folded into quantizer model = quantize(model, example_inputs, quant_type) - ep = torch.export.export_for_training(model, example_inputs, strict=True) + ep = torch.export.export_for_training(model, example_inputs, strict=False) edge = to_edge_transform_and_lower( ep,