Skip to content

Commit 0bc272e

Browse files
committed
Update
[ghstack-poisoned]
2 parents f37a11d + 895efcb commit 0bc272e

File tree

4 files changed

+9
-23
lines changed

4 files changed

+9
-23
lines changed

backends/arm/test/models/test_conformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ class TestConformer(unittest.TestCase):
3333
"executorch_exir_dialects_edge__ops_aten_max_default": 1,
3434
"executorch_exir_dialects_edge__ops_aten_eq_Scalar": 2,
3535
"executorch_exir_dialects_edge__ops_aten_where_self": 4,
36-
"torch.ops.aten._assert_scalar.default": 10,
36+
"torch.ops.aten._assert_scalar.default": 2,
3737
"torch.ops.aten._local_scalar_dense.default": 1,
3838
"torch.ops.higher_order.executorch_call_delegate": 6,
3939
}

backends/cadence/hifi/operators/op_add.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -143,14 +143,15 @@ Tensor& add_out(
143143

144144
if ((a_dim == 0) && float_types) {
145145
for (int i = 0; i < b.numel(); i++)
146-
out.mutable_data_ptr<float>()[i] =
147-
a.const_data_ptr<float>()[0] + b.const_data_ptr<float>()[i];
146+
out.mutable_data_ptr<float>()[i] = a.const_data_ptr<float>()[0] +
147+
alpha_val * b.const_data_ptr<float>()[i];
148148
return out;
149149
}
150150
if ((b_dim == 0) && float_types) {
151+
// Precompute the value of b * alpha since it's a constant.
152+
const float val_b = alpha_val * b.const_data_ptr<float>()[0];
151153
for (int i = 0; i < a.numel(); i++)
152-
out.mutable_data_ptr<float>()[i] =
153-
a.const_data_ptr<float>()[i] + b.const_data_ptr<float>()[0];
154+
out.mutable_data_ptr<float>()[i] = a.const_data_ptr<float>()[i] + val_b;
154155
return out;
155156
}
156157

backends/cadence/hifi/operators/op_div.cpp

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -214,22 +214,6 @@ Tensor& div_out_mode(
214214
if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float))
215215
optimized = 0;
216216

217-
bool float_types =
218-
(a_type == ScalarType::Float) && (b_type == ScalarType::Float);
219-
220-
if ((a_dim == 0) && float_types) {
221-
for (int i = 0; i < b.numel(); i++)
222-
out.mutable_data_ptr<float>()[i] =
223-
a.const_data_ptr<float>()[0] / b.const_data_ptr<float>()[i];
224-
return out;
225-
}
226-
if ((b_dim == 0) && float_types) {
227-
for (int i = 0; i < a.numel(); i++)
228-
out.mutable_data_ptr<float>()[i] =
229-
a.const_data_ptr<float>()[i] / b.const_data_ptr<float>()[0];
230-
return out;
231-
}
232-
233217
if ((broadcast == 1) && (max_dim > kNnlibMaxDim))
234218
optimized = 0;
235219
int mode_val = -1;

backends/cadence/hifi/operators/op_sub.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,9 +143,10 @@ Tensor& sub_out(
143143
return out;
144144
}
145145
if ((b_dim == 0) && float_types) {
146+
// Precompute the value of b * alpha since it's a constant.
147+
const float val_b = alpha_val * b.const_data_ptr<float>()[0];
146148
for (int i = 0; i < a.numel(); i++)
147-
out.mutable_data_ptr<float>()[i] =
148-
a.const_data_ptr<float>()[i] - b.const_data_ptr<float>()[0];
149+
out.mutable_data_ptr<float>()[i] = a.const_data_ptr<float>()[i] - val_b;
149150
return out;
150151
}
151152

0 commit comments

Comments
 (0)