Skip to content

Commit 5863388

Browse files
authored
Merge branch 'main' into change-1018030
2 parents 007d92e + f2a08da commit 5863388

File tree

5 files changed

+23
-13
lines changed

5 files changed

+23
-13
lines changed

backends/arm/test/models/test_mobilenet_v3_arm.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def test_mv3_tosa_BI():
4646
aten_op=[],
4747
exir_op=[],
4848
use_to_edge_transform_and_lower=True,
49-
atol=0.3,
49+
atol=0.5,
5050
qtol=1,
5151
)
5252
pipeline.run()
@@ -63,7 +63,7 @@ def test_mv3_u55_BI():
6363
exir_ops=[],
6464
run_on_fvp=True,
6565
use_to_edge_transform_and_lower=True,
66-
atol=0.3,
66+
atol=0.5,
6767
qtol=1,
6868
)
6969
pipeline.run()
@@ -80,7 +80,7 @@ def test_mv3_u85_BI():
8080
exir_ops=[],
8181
run_on_fvp=True,
8282
use_to_edge_transform_and_lower=True,
83-
atol=0.3,
83+
atol=0.5,
8484
qtol=1,
8585
)
8686
pipeline.run()

backends/arm/test/models/test_torch_functions.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ def forward(self, *args):
101101
"Requires dynamic output shape.",
102102
"topk": "NotImplementedError: No registered serialization name for <class 'torch.return_types.topk'> found",
103103
"sort": "NotImplementedError: No registered serialization name for <class 'torch.return_types.sort'> found",
104+
"norm": "An error occurred when running the 'KeepDimsFalseToSqueezePass' pass after the following passes:",
104105
},
105106
)
106107
def test_torch_fns_MI(test_data):
@@ -129,6 +130,7 @@ def test_torch_fns_MI(test_data):
129130
"topk": "NotImplementedError: No registered serialization name for <class 'torch.return_types.topk'> found",
130131
"sort": "NotImplementedError: No registered serialization name for <class 'torch.return_types.sort'> found",
131132
"t": "MLETORCH-855: Issue with Quantization folding.",
133+
"norm": "An error occurred when running the 'KeepDimsFalseToSqueezePass' pass after the following passes:",
132134
},
133135
strict=False,
134136
)

backends/arm/test/ops/test_sigmoid_16bit.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def forward(self, x):
8181

8282

8383
@common.parametrize("test_data", test_data_suite)
84-
@pytest.mark.flaky(reruns=5)
84+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
8585
def test_sigmoid_tosa_BI(test_data):
8686
pipeline = TosaPipelineBI(
8787
Sigmoid(), (test_data(),), Sigmoid.aten_op, Sigmoid.exir_op
@@ -97,7 +97,7 @@ def test_sigmoid_tosa_BI(test_data):
9797
"ramp": "AssertionError: Output 0 does not match reference output. MLETORCH-787"
9898
},
9999
)
100-
@pytest.mark.flaky(reruns=5)
100+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
101101
def test_sigmoid_add_sigmoid_tosa_BI(test_data):
102102
pipeline = TosaPipelineBI(
103103
SigmoidAddSigmoid(), (test_data(),), Sigmoid.aten_op, Sigmoid.exir_op
@@ -110,6 +110,7 @@ def test_sigmoid_add_sigmoid_tosa_BI(test_data):
110110
"test_data",
111111
test_data_suite,
112112
)
113+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
113114
def test_sigmoid_tosa_u55(test_data):
114115
pipeline = OpNotSupportedPipeline(
115116
Sigmoid(), (test_data(),), "TOSA-0.80+BI+u55", {Sigmoid.exir_op: 1}
@@ -122,6 +123,7 @@ def test_sigmoid_tosa_u55(test_data):
122123
"test_data",
123124
test_data_suite,
124125
)
126+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
125127
def test_sigmoid_add_sigmoid_tosa_u55(test_data):
126128
pipeline = OpNotSupportedPipeline(
127129
SigmoidAddSigmoid(),
@@ -135,7 +137,7 @@ def test_sigmoid_add_sigmoid_tosa_u55(test_data):
135137

136138

137139
@common.parametrize("test_data", test_data_suite)
138-
@pytest.mark.flaky(reruns=5)
140+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
139141
@common.XfailIfNoCorstone320
140142
def test_sigmoid_tosa_u85(test_data):
141143
pipeline = EthosU85PipelineBI(
@@ -152,7 +154,7 @@ def test_sigmoid_tosa_u85(test_data):
152154
"ramp": "AssertionError: Output 0 does not match reference output.",
153155
},
154156
)
155-
@pytest.mark.flaky(reruns=5)
157+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
156158
@common.XfailIfNoCorstone320
157159
def test_sigmoid_add_sigmoid_tosa_u85(test_data):
158160
pipeline = EthosU85PipelineBI(

backends/arm/test/ops/test_sigmoid_32bit.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def forward(self, x):
9797

9898

9999
@common.parametrize("test_data", test_data_suite)
100-
@pytest.mark.flaky(reruns=5)
100+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
101101
def test_sigmoid_tosa_BI(test_data):
102102
pipeline = TosaPipelineBI(
103103
Sigmoid(),
@@ -110,7 +110,7 @@ def test_sigmoid_tosa_BI(test_data):
110110

111111

112112
@common.parametrize("test_data", test_data_suite)
113-
@pytest.mark.flaky(reruns=5)
113+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
114114
def test_sigmoid_add_sigmoid_tosa_BI(test_data):
115115
pipeline = TosaPipelineBI(
116116
SigmoidAddSigmoid(),
@@ -123,6 +123,7 @@ def test_sigmoid_add_sigmoid_tosa_BI(test_data):
123123

124124

125125
@common.parametrize("test_data", test_data_suite)
126+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
126127
def test_sigmoid_tosa_u55(test_data):
127128
pipeline = OpNotSupportedPipeline(
128129
Sigmoid(), (test_data(),), "TOSA-0.80+BI+u55", {Sigmoid.exir_op: 1}
@@ -132,6 +133,7 @@ def test_sigmoid_tosa_u55(test_data):
132133

133134

134135
@common.parametrize("test_data", test_data_suite)
136+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
135137
def test_sigmoid_add_sigmoid_tosa_u55(test_data):
136138
pipeline = OpNotSupportedPipeline(
137139
SigmoidAddSigmoid(),
@@ -145,7 +147,7 @@ def test_sigmoid_add_sigmoid_tosa_u55(test_data):
145147

146148

147149
@common.parametrize("test_data", test_data_suite)
148-
@pytest.mark.flaky(reruns=5)
150+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
149151
@common.XfailIfNoCorstone320
150152
def test_sigmoid_tosa_u85(test_data):
151153
pipeline = EthosU85PipelineBI(
@@ -162,7 +164,7 @@ def test_sigmoid_tosa_u85(test_data):
162164
"ramp": "AssertionError: Output 0 does not match reference output.",
163165
},
164166
)
165-
@pytest.mark.flaky(reruns=5)
167+
@pytest.mark.flaky(reruns=32) # Flaky due to Vela bug: MLBEDSW-10642
166168
@common.XfailIfNoCorstone320
167169
def test_sigmoid_add_sigmoid_tosa_u85(test_data):
168170
pipeline = EthosU85PipelineBI(

docs/source/getting-started.md

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ print("Comparing against original PyTorch module")
9999
print(torch.allclose(output[0], eager_reference_output, rtol=1e-3, atol=1e-5))
100100
```
101101

102+
For complete examples of exporting and running the model, please refer to our [examples GitHub repository](https://github.com/pytorch-labs/executorch-examples/tree/main/mv2/python).
102103

103104
<hr/>
104105

@@ -178,6 +179,7 @@ target_link_libraries(
178179
xnnpack_backend)
179180
```
180181

182+
181183
#### Runtime APIs
182184
Both high-level and low-level C++ APIs are provided. The low-level APIs are platform independent, do not dynamically allocate memory, and are most suitable for resource-constrained embedded systems. The high-level APIs are provided as a convenience wrapper around the lower-level APIs, and make use of dynamic memory allocation and standard library constructs to reduce verbosity.
183185

@@ -194,8 +196,8 @@ using namespace ::executorch::extension;
194196
Module module("/path/to/model.pte");
195197

196198
// Create an input tensor.
197-
float input[1 * 3 * 256 * 256];
198-
auto tensor = from_blob(input, {1, 3, 256, 256});
199+
float input[1 * 3 * 224 * 224];
200+
auto tensor = from_blob(input, {1, 3, 224, 224});
199201

200202
// Perform an inference.
201203
const auto result = module.forward(tensor);
@@ -208,6 +210,8 @@ if (result.ok()) {
208210
209211
For more information on the C++ APIs, see [Running an ExecuTorch Model Using the Module Extension in C++](extension-module.md) and [Managing Tensor Memory in C++](extension-tensor.md).
210212
213+
For complete examples of building and running C++ application, please refer to our [examples GitHub repository](https://github.com/pytorch-labs/executorch-examples/tree/main/mv2/cpp).
214+
211215
<hr/>
212216
213217
## Next Steps

0 commit comments

Comments
 (0)