Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 35 additions & 42 deletions kernels/optimized/cpu/op_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,7 @@ Tensor& opt_add_out(
ET_SWITCH_REALB_TYPES(b_type, ctx, "add.out", CTYPE_B, [&]() {
CTYPE alpha_val;
ET_KERNEL_CHECK(
ctx,
torch::executor::native::utils::extract_scalar(alpha, &alpha_val),
InvalidArgument, );
ctx, utils::extract_scalar(alpha, &alpha_val), InvalidArgument, );
CTYPE_B b_val = *b.const_data_ptr<CTYPE_B>();
CTYPE b_casted = static_cast<CTYPE>(b_val);

Expand Down Expand Up @@ -81,7 +79,6 @@ Tensor& opt_add_scalar_out(
(void)ctx;

ScalarType a_type = a.scalar_type();
ScalarType b_type = utils::get_scalar_dtype(b);
ScalarType common_type =
utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false);
ScalarType out_type = out.scalar_type();
Expand All @@ -99,47 +96,43 @@ Tensor& opt_add_scalar_out(
if (a_type == common_type && a_type == out_type &&
a_type != ScalarType::Half && a_type != ScalarType::BFloat16) {
ET_SWITCH_REALB_TYPES(a_type, ctx, "add.Scalar_out", CTYPE, [&]() {
ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "add.Scalar_out", CTYPE_B, [&]() {
CTYPE_B b_val;
ET_EXTRACT_SCALAR(b, b_val);
CTYPE b_casted = static_cast<CTYPE>(b_val);
CTYPE alpha_val;
ET_EXTRACT_SCALAR(alpha, alpha_val);

using Vec = at::vec::Vectorized<CTYPE>;
at::vec::map<CTYPE>(
[alpha_val, b_casted](Vec x) {
return x + Vec(alpha_val * b_casted);
},
out.mutable_data_ptr<CTYPE>(),
a.const_data_ptr<CTYPE>(),
out.numel());
});
CTYPE b_casted = utils::scalar_to<CTYPE>(b);
CTYPE alpha_val;
ET_KERNEL_CHECK(
ctx, utils::extract_scalar(alpha, &alpha_val), InvalidArgument, );

using Vec = at::vec::Vectorized<CTYPE>;
at::vec::map<CTYPE>(
[alpha_val, b_casted](Vec x) {
return x + Vec(alpha_val * b_casted);
},
out.mutable_data_ptr<CTYPE>(),
a.const_data_ptr<CTYPE>(),
out.numel());
});
} else {
ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, "add.Scalar_out", CTYPE_A, [&]() {
ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "add.Scalar_out", CTYPE_B, [&]() {
ET_SWITCH_REALB_TYPES(
common_type, ctx, "add.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALHBBF16_TYPES(
out_type, ctx, "add.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_B b_val;
ET_EXTRACT_SCALAR(b, b_val);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(b_val);
CTYPE_IN alpha_val;
ET_EXTRACT_SCALAR(alpha, alpha_val);

const size_t n = a.numel();
const CTYPE_A* a_data = a.const_data_ptr<CTYPE_A>();
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
for (auto i = 0; i < n; ++i) {
out_data[i] = static_cast<CTYPE_OUT>(
static_cast<CTYPE_IN>(a_data[i]) +
alpha_val * b_casted);
}
});
});
});
ET_SWITCH_REALB_TYPES(
common_type, ctx, "add.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALHBBF16_TYPES(
out_type, ctx, "add.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_IN b_casted = utils::scalar_to<CTYPE_IN>(b);
CTYPE_IN alpha_val;
ET_KERNEL_CHECK(
ctx,
utils::extract_scalar(alpha, &alpha_val),
InvalidArgument, );

const size_t n = a.numel();
const CTYPE_A* a_data = a.const_data_ptr<CTYPE_A>();
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
for (auto i = 0; i < n; ++i) {
out_data[i] = static_cast<CTYPE_OUT>(
static_cast<CTYPE_IN>(a_data[i]) +
alpha_val * b_casted);
}
});
});
});
}

Expand Down
55 changes: 23 additions & 32 deletions kernels/optimized/cpu/op_mul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,6 @@ Tensor& opt_mul_scalar_out(
(void)ctx;

ScalarType a_type = a.scalar_type();
ScalarType b_type = utils::get_scalar_dtype(b);
ScalarType common_type =
utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false);
ScalarType out_type = out.scalar_type();
Expand All @@ -236,40 +235,32 @@ Tensor& opt_mul_scalar_out(
if (a_type == common_type && a_type == out_type &&
a_type != ScalarType::Half && a_type != ScalarType::BFloat16) {
ET_SWITCH_REALB_TYPES(a_type, ctx, "mul.Scalar_out", CTYPE, [&]() {
ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "mul.Scalar_out", CTYPE_B, [&]() {
CTYPE_B b_val;
ET_EXTRACT_SCALAR(b, b_val);
CTYPE b_casted = static_cast<CTYPE>(b_val);

using Vec = at::vec::Vectorized<CTYPE>;
at::vec::map<CTYPE>(
[b_casted](Vec x) { return x * Vec(b_casted); },
out.mutable_data_ptr<CTYPE>(),
a.const_data_ptr<CTYPE>(),
out.numel());
});
CTYPE b_casted = utils::scalar_to<CTYPE>(b);

using Vec = at::vec::Vectorized<CTYPE>;
at::vec::map<CTYPE>(
[b_casted](Vec x) { return x * Vec(b_casted); },
out.mutable_data_ptr<CTYPE>(),
a.const_data_ptr<CTYPE>(),
out.numel());
});
} else {
ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, "mul.Scalar_out", CTYPE_A, [&]() {
ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "mul.Scalar_out", CTYPE_B, [&]() {
ET_SWITCH_REALB_TYPES(
common_type, ctx, "mul.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALHBBF16_TYPES(
out_type, ctx, "mul.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_B b_val;
ET_EXTRACT_SCALAR(b, b_val);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(b_val);

const size_t n = a.numel();
const CTYPE_A* a_data = a.const_data_ptr<CTYPE_A>();
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
for (auto i = 0; i < n; ++i) {
out_data[i] = static_cast<CTYPE_OUT>(
static_cast<CTYPE_IN>(a_data[i]) * b_casted);
}
});
});
});
ET_SWITCH_REALB_TYPES(
common_type, ctx, "mul.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALHBBF16_TYPES(
out_type, ctx, "mul.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_IN b_casted = utils::scalar_to<CTYPE_IN>(b);

const size_t n = a.numel();
const CTYPE_A* a_data = a.const_data_ptr<CTYPE_A>();
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
for (auto i = 0; i < n; ++i) {
out_data[i] = static_cast<CTYPE_OUT>(
static_cast<CTYPE_IN>(a_data[i]) * b_casted);
}
});
});
});
}

Expand Down
73 changes: 32 additions & 41 deletions kernels/optimized/cpu/op_sub.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,6 @@ Tensor& opt_sub_scalar_out(
(void)ctx;

ScalarType a_type = a.scalar_type();
ScalarType b_type = utils::get_scalar_dtype(b);
ScalarType common_type =
utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false);
ScalarType out_type = out.scalar_type();
Expand All @@ -172,49 +171,41 @@ Tensor& opt_sub_scalar_out(
if (a_type == common_type && a_type == out_type &&
a_type != ScalarType::Half) {
ET_SWITCH_REAL_TYPES(a_type, ctx, "sub.Scalar_out", CTYPE, [&]() {
ET_SWITCH_SCALAR_OBJ_REAL_TYPES(
b_type, ctx, "sub.Scalar_out", CTYPE_B, [&]() {
CTYPE_B b_val;
ET_EXTRACT_SCALAR(b, b_val);
CTYPE b_casted = static_cast<CTYPE>(b_val);
CTYPE alpha_val;
ET_EXTRACT_SCALAR(alpha, alpha_val);

using Vec = at::vec::Vectorized<CTYPE>;
at::vec::map<CTYPE>(
[alpha_val, b_casted](Vec x) {
return x - Vec(alpha_val * b_casted);
},
out.mutable_data_ptr<CTYPE>(),
a.const_data_ptr<CTYPE>(),
out.numel());
});
CTYPE b_casted = utils::scalar_to<CTYPE>(b);
CTYPE alpha_val;
ET_KERNEL_CHECK(
ctx, utils::extract_scalar(alpha, &alpha_val), InvalidArgument, );

using Vec = at::vec::Vectorized<CTYPE>;
at::vec::map<CTYPE>(
[alpha_val, b_casted](Vec x) {
return x - Vec(alpha_val * b_casted);
},
out.mutable_data_ptr<CTYPE>(),
a.const_data_ptr<CTYPE>(),
out.numel());
});
} else {
ET_SWITCH_REALH_TYPES(a_type, ctx, "sub.Scalar_out", CTYPE_A, [&]() {
ET_SWITCH_SCALAR_OBJ_REAL_TYPES(
b_type, ctx, "sub.Scalar_out", CTYPE_B, [&]() {
ET_SWITCH_REAL_TYPES(
common_type, ctx, "sub.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALH_TYPES(
out_type, ctx, "sub.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_B b_val;
ET_EXTRACT_SCALAR(b, b_val);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(b_val);
CTYPE_IN alpha_val;
ET_EXTRACT_SCALAR(alpha, alpha_val);

const size_t n = a.numel();
const CTYPE_A* a_data = a.const_data_ptr<CTYPE_A>();
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
for (auto i = 0; i < n; ++i) {
out_data[i] = static_cast<CTYPE_OUT>(
static_cast<CTYPE_IN>(a_data[i]) -
alpha_val * b_casted);
}
});
});
});
ET_SWITCH_REAL_TYPES(common_type, ctx, "sub.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALH_TYPES(
out_type, ctx, "sub.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_IN b_casted = utils::scalar_to<CTYPE_IN>(b);
CTYPE_IN alpha_val;
ET_KERNEL_CHECK(
ctx,
utils::extract_scalar(alpha, &alpha_val),
InvalidArgument, );

const size_t n = a.numel();
const CTYPE_A* a_data = a.const_data_ptr<CTYPE_A>();
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
for (auto i = 0; i < n; ++i) {
out_data[i] = static_cast<CTYPE_OUT>(
static_cast<CTYPE_IN>(a_data[i]) - alpha_val * b_casted);
}
});
});
});
}

Expand Down
28 changes: 27 additions & 1 deletion kernels/portable/cpu/scalar_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

#pragma once

#include <algorithm>
#include <cmath>
#include <limits>

Expand Down Expand Up @@ -261,6 +260,33 @@ bool extract_scalar(Scalar scalar, BOOL_T* out_val) {
return false;
}

/*
* Convert Scalar to C++ type
*/

template <typename T>
T scalar_to(const Scalar& s) {
if (s.isBoolean()) {
return static_cast<T>(s.to<bool>());
} else if (s.isFloatingPoint()) {
return static_cast<T>(s.to<double>());
} else {
return static_cast<T>(s.to<int64_t>());
}
}

template <>
inline double scalar_to<double>(const Scalar& s) {
return s.isFloatingPoint() ? s.to<double>()
: static_cast<double>(s.to<int64_t>());
}

template <>
inline int64_t scalar_to<int64_t>(const Scalar& s) {
return s.isFloatingPoint() ? static_cast<int64_t>(s.to<double>())
: s.to<int64_t>();
}

} // namespace utils
} // namespace native
} // namespace executor
Expand Down
29 changes: 1 addition & 28 deletions kernels/portable/cpu/util/elementwise_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#pragma once

#include <c10/util/irange.h>
#include <executorch/kernels/portable/cpu/scalar_utils.h>
#include <executorch/kernels/portable/cpu/util/broadcast_indexes_range.h>
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
#include <executorch/kernels/portable/cpu/util/dtype_util.h>
Expand All @@ -27,34 +28,6 @@ namespace torch {
namespace executor {
namespace native {
namespace utils {

/*
* Convert Scalar to C++ type
*/

template <typename T>
T scalar_to(const Scalar& s) {
if (s.isBoolean()) {
return static_cast<T>(s.to<bool>());
} else if (s.isFloatingPoint()) {
return static_cast<T>(s.to<double>());
} else {
return static_cast<T>(s.to<int64_t>());
}
}

template <>
inline double scalar_to<double>(const Scalar& s) {
return s.isFloatingPoint() ? s.to<double>()
: static_cast<double>(s.to<int64_t>());
}

template <>
inline int64_t scalar_to<int64_t>(const Scalar& s) {
return s.isFloatingPoint() ? static_cast<int64_t>(s.to<double>())
: s.to<int64_t>();
}

namespace internal {
/**
* Causes these utility functions to make sure to respect Tensor
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/util/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,9 @@ def define_common_targets():
"//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch",
"//executorch/runtime/kernel:kernel_runtime_context",
"//executorch/extension/threadpool:threadpool",
"//executorch/kernels/portable/cpu:scalar_utils",
],
deps = [
"//executorch/kernels/portable/cpu:scalar_utils",
"//executorch/runtime/kernel:kernel_includes",
],
visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/optimized/cpu/...", "@EXECUTORCH_CLIENTS"],
Expand Down
Loading