From 54377b834ae793bb24a28cba18e6f4f051c016b9 Mon Sep 17 00:00:00 2001 From: "Yanan Cao (PyTorch)" Date: Thu, 19 Dec 2024 10:49:49 -0800 Subject: [PATCH] executorch/backends/qualcomm/utils (#7382) Summary: Pull Request resolved: https://github.com/pytorch/executorch/pull/7382 Reviewed By: avikchaudhuri, ydwu4 Differential Revision: D67381455 --- backends/qualcomm/utils/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backends/qualcomm/utils/utils.py b/backends/qualcomm/utils/utils.py index 33be00ed51d..a73fe6944eb 100644 --- a/backends/qualcomm/utils/utils.py +++ b/backends/qualcomm/utils/utils.py @@ -337,7 +337,7 @@ def capture_program( inputs: Tuple[torch.Tensor], custom_pass_config: FrozenSet[str] = frozenset(), ) -> exir.ExirExportedProgram: - ep = torch.export.export(module, inputs) + ep = torch.export.export(module, inputs, strict=True) decomposed_ep = ep.run_decompositions(get_decomp_table()) # We choose call_operator by target in ConvertBinaryOpsWithScalar # because it is the same source_fn_stack for MultiheadAttention @@ -551,7 +551,7 @@ def prepare_subgm(subgm, subgm_name): fp_node_id_set = fp_node_id_set if fp_node_id_set is not None else set() fp_node_op_set = fp_node_op_set if fp_node_op_set is not None else set() - graph_module = torch.export.export(nn_module, sample_input).module() + graph_module = torch.export.export(nn_module, sample_input, strict=True).module() # define node support type capability_partitioner = CapabilityBasedPartitioner( graph_module, @@ -664,7 +664,7 @@ def forward(self, *inputs): ).default(inputs) model = Model() - prog = torch.export.export(model, tuple(inputs.values())) + prog = torch.export.export(model, tuple(inputs.values()), strict=True) # bookkeeping for variables' life cycle return { "custom_op": custom_op,