Skip to content

Commit 6ad490a

Browse files
committed
Adding sigmoid optimizations
1 parent 71d78a1 commit 6ad490a

File tree

2 files changed

+73
-1
lines changed

2 files changed

+73
-1
lines changed

backends/cadence/hifi/operators/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,14 @@ set(_aten_ops__srcs
2323
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp"
2424
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp"
2525
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp"
26+
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp"
2627
"${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp"
2728
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp"
2829
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp"
2930
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp"
3031
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp"
3132
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_full.cpp"
3233
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp"
33-
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sigmoid.cpp"
3434
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_slice_copy.cpp"
3535
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_softmax.cpp"
3636
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp"
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <cmath>
10+
11+
#include <executorch/kernels/portable/cpu/util/functional_util.h>
12+
#include <executorch/runtime/kernel/kernel_includes.h>
13+
#include "kernels.h"
14+
15+
namespace torch {
16+
namespace executor {
17+
namespace native {
18+
19+
using Tensor = exec_aten::Tensor;
20+
21+
Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
22+
(void)ctx;
23+
24+
ET_KERNEL_CHECK(
25+
ctx, in.scalar_type() != ScalarType::Bool, InvalidArgument, out);
26+
ET_KERNEL_CHECK(ctx, tensor_is_floating_type(out), InvalidArgument, out);
27+
28+
// Resize for dynamic shape
29+
ET_KERNEL_CHECK_MSG(
30+
ctx,
31+
resize_tensor(out, in.sizes()) == Error::Ok,
32+
InvalidArgument,
33+
out,
34+
"Failed to resize output tensor.");
35+
36+
ScalarType in_type = in.scalar_type();
37+
ScalarType out_type = out.scalar_type();
38+
39+
int fall_back = 0;
40+
if((in_type != ScalarType::Float) || (out_type != ScalarType::Float))
41+
fall_back = 1;
42+
43+
if(!fall_back)
44+
{
45+
float* data_in = in.mutable_data_ptr<float>();
46+
float* data_out = out.mutable_data_ptr<float>();
47+
xa_nn_vec_sigmoid_f32_f32(data_out, data_in, in.numel());
48+
}
49+
else
50+
{
51+
ET_SWITCH_REALHB_TYPES(in_type, ctx, "sigmoid.out", CTYPE_IN, [&]() {
52+
ET_SWITCH_FLOATH_TYPES(out_type, ctx, "sigmoid.out", CTYPE_OUT, [&]() {
53+
apply_unary_map_fn(
54+
[](const CTYPE_IN val_in) {
55+
// perform math in double to preserve precision
56+
double in_casted = static_cast<double>(val_in);
57+
double out_val = 1.0 / (1.0 + exp(-in_casted));
58+
return static_cast<CTYPE_OUT>(out_val);
59+
},
60+
in.const_data_ptr<CTYPE_IN>(),
61+
out.mutable_data_ptr<CTYPE_OUT>(),
62+
in.numel());
63+
});
64+
});
65+
}
66+
67+
return out;
68+
}
69+
70+
} // namespace native
71+
} // namespace executor
72+
} // namespace torch

0 commit comments

Comments
 (0)