Skip to content

Commit 7cb56e0

Browse files
committed
Update
[ghstack-poisoned]
2 parents 5cbef6f + 1fea741 commit 7cb56e0

31 files changed

+388
-246
lines changed

.github/scripts/extract_benchmark_results.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,6 +360,7 @@ def transform(
360360
"app_type": app_type,
361361
# Just keep a copy of the benchmark config here
362362
"benchmark_config": json.dumps(benchmark_config),
363+
"job_conclusion": "SUCCESS",
363364
},
364365
},
365366
"model": {
@@ -455,7 +456,7 @@ def transform_failure_record(
455456
},
456457
"metric": {
457458
"name": "FAILURE_REPORT",
458-
"benchmark_values": 0,
459+
"benchmark_values": [0],
459460
"target_value": 0,
460461
"extra_info": {
461462
"method": "",

backends/apple/coreml/test/test_coreml_quantizer.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,9 @@ def quantize_and_compare(
3232
) -> None:
3333
assert quantization_type in {"PTQ", "QAT"}
3434

35-
pre_autograd_aten_dialect = export_for_training(model, example_inputs).module()
35+
pre_autograd_aten_dialect = export_for_training(
36+
model, example_inputs, strict=True
37+
).module()
3638

3739
quantization_config = LinearQuantizerConfig.from_dict(
3840
{

backends/apple/mps/test/test_mps_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ def lower_module_and_test_output(
207207
expected_output = model(*sample_inputs)
208208

209209
model = torch.export.export_for_training(
210-
model, sample_inputs, dynamic_shapes=dynamic_shapes
210+
model, sample_inputs, dynamic_shapes=dynamic_shapes, strict=True
211211
).module()
212212

213213
edge_program = export_to_edge(

backends/arm/operators/op_table.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def define_node(
4141

4242
if inputs[0].dtype not in (ts.DType.INT8, ts.DType.INT16):
4343
raise ValueError(
44-
f"TOSA.TABLE only supports int8 or int16 inputs, got {ts.DTypeNames[inputs[0]]}"
44+
f"TOSA.TABLE only supports int8 or int16 inputs, got {ts.DTypeNames[inputs[0].dtype]}"
4545
)
4646

4747
table = self._exported_program.state_dict[node.name] # type: ignore[union-attr]

backends/arm/test/misc/test_debug_feats.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -192,16 +192,13 @@ def test_collate_tosa_BI_tests(self):
192192
.to_edge_transform_and_lower()
193193
.to_executorch()
194194
)
195+
196+
test_collate_dir = "test_collate_tosa_tests/tosa-bi/TestCollateTosaTests/test_collate_tosa_BI_tests"
195197
# test that the output directory is created and contains the expected files
196-
assert os.path.exists(
197-
"test_collate_tosa_tests/tosa-bi/TestCollateTosaTests/test_collate_tosa_BI_tests"
198-
)
199-
assert os.path.exists(
200-
"test_collate_tosa_tests/tosa-bi/TestCollateTosaTests/test_collate_tosa_BI_tests/output_tag6_TOSA-0.80+BI.tosa"
201-
)
202-
assert os.path.exists(
203-
"test_collate_tosa_tests/tosa-bi/TestCollateTosaTests/test_collate_tosa_BI_tests/desc_tag6_TOSA-0.80+BI.json"
204-
)
198+
assert os.path.exists(test_collate_dir)
199+
200+
for file in os.listdir(test_collate_dir):
201+
assert file.endswith(("TOSA-0.80+BI.json", "TOSA-0.80+BI.tosa"))
205202

206203
os.environ.pop("TOSA_TESTCASES_BASE_PATH")
207204
shutil.rmtree("test_collate_tosa_tests", ignore_errors=True)

backends/arm/test/misc/test_partition_decomposed_quantized_ops.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,12 @@ def test_linear_residaul_tosa_MI(test_data: input_t1):
145145
pipeline.run()
146146

147147

148-
@common.parametrize("test_data", test_data)
148+
@common.parametrize(
149+
"test_data",
150+
test_data,
151+
{"3d_rand": "MLETORCH-855: Issue with Quantization folding."},
152+
strict=False,
153+
)
149154
def test_linear_residual_tosa_BI(test_data: input_t1):
150155
pipeline = TosaPipelineBI[input_t1](
151156
LinearResidualModule(),

backends/arm/test/models/test_nn_functional.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,15 @@ def test_nn_functional_MI(test_data):
104104
raise e
105105

106106

107-
@parametrize("test_data", module_tests)
107+
x_fails = {
108+
"normalize": "MLETORCH-852: Support aten.index_put.default",
109+
"cosine_similarity": "MLETORCH-854: Support aten.linalg_vector_norm.default",
110+
"unfold": "Int64 input && MLETORCH-827: Support aten.index.Tensor",
111+
"fold": "Int64 input && MLETORCH-827: Support aten.index_put.default",
112+
}
113+
114+
115+
@parametrize("test_data", module_tests, x_fails, strict=False)
108116
def test_nn_functional_BI(test_data):
109117
module, inputs = test_data
110118
pipeline = TosaPipelineBI[input_t](

backends/arm/test/models/test_w2l_arm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@ def test_w2l_u55_BI(self):
132132
@pytest.mark.slow
133133
@pytest.mark.corstone_fvp
134134
@conftest.expectedFailureOnFVP # TODO: MLETORCH-761
135+
@pytest.mark.skip(reason="Intermittent timeout issue: MLETORCH-856")
135136
def test_w2l_u85_BI(self):
136137
tester = self._test_w2l_ethos_BI_pipeline(
137138
self.w2l,

backends/cadence/aot/compiler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def convert_pt2(
8686
remove_decompositions(decomp_table, ops_to_keep)
8787
# Export with dynamo
8888
model_gm = (
89-
torch.export.export_for_training(model, inputs)
89+
torch.export.export_for_training(model, inputs, strict=True)
9090
.run_decompositions(decomp_table)
9191
.module()
9292
)

backends/cadence/aot/ops_registrations.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,10 @@
162162
"quantized_fully_connected.per_tensor(Tensor src, Tensor weight, Tensor bias, int src_zero_point, "
163163
"int weight_zero_point, int out_multiplier, int out_shift, int out_zero_point, Tensor? offset) -> (Tensor Z)"
164164
)
165+
lib.define("where_Scalar(Tensor condition, float self, float other) -> (Tensor Z)")
166+
lib.define(
167+
"where_Scalar.out(Tensor condition, float self, float other, *, Tensor(a!) out) -> Tensor(a!)"
168+
)
165169

166170
# ------------------------------------ #
167171
# Migrated from custom_ops.yaml #
@@ -935,3 +939,12 @@ def transposed_im2row_meta(
935939
output_size = torch.Size((batch_size, output_length, n_output_plane))
936940

937941
return input.new_empty(output_size, dtype=input.dtype)
942+
943+
944+
@register_fake("cadence::where_Scalar")
945+
def where_Scalar_meta(
946+
condition: torch.Tensor,
947+
self: float,
948+
other: float,
949+
) -> torch.Tensor:
950+
return condition.new_empty(condition.size(), dtype=torch.float32)

0 commit comments

Comments
 (0)