Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
245 changes: 96 additions & 149 deletions kernels/portable/cpu/op_pow.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,101 +9,60 @@
#include <cmath>

#include <executorch/kernels/portable/cpu/scalar_utils.h>
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
#include <executorch/kernels/portable/cpu/util/functional_util.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
#include <executorch/kernels/portable/cpu/util/elementwise_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

namespace torch {
namespace executor {
namespace native {

using Tensor = exec_aten::Tensor;

namespace {
template <
bool can_cast,
typename CTYPE_A,
typename CTYPE_B,
typename CTYPE_IN,
typename CTYPE_OUT>
struct PowInner;

template <
typename CTYPE_A,
typename CTYPE_B,
typename CTYPE_IN,
typename CTYPE_OUT>
struct PowInner<true, CTYPE_A, CTYPE_B, CTYPE_IN, CTYPE_OUT> {
static void run(const Tensor& a, const Tensor& b, Tensor& out) {
apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>(
// NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue)
[](const CTYPE_A val_a, const CTYPE_B val_b) {
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b);
CTYPE_IN value = std::pow(a_casted, b_casted);
return static_cast<CTYPE_OUT>(value);
},
a,
b,
out);
}
};

struct ReportCanCastBug {
static void run(const Tensor&, const Tensor&, Tensor&) {
ET_DCHECK_MSG(false, "BUG: canCast should have been checked above");
}
};

template <
typename CTYPE_A,
typename CTYPE_B,
typename CTYPE_IN,
typename CTYPE_OUT>
struct PowInner<false, CTYPE_A, CTYPE_B, CTYPE_IN, CTYPE_OUT>
: public ReportCanCastBug {};

} // namespace

Tensor& pow_Tensor_Tensor_out(
KernelRuntimeContext& ctx,
const Tensor& a,
const Tensor& b,
Tensor& out) {
// Determine output size and resize for dynamic shapes
// Common Dtype
ScalarType common_type = promoteTypes(a.scalar_type(), b.scalar_type());

// Check Common Dtype
ET_KERNEL_CHECK(
ctx,
resize_to_broadcast_target_size(a, b, out) == Error::Ok,
(canCast(common_type, out.scalar_type()) &&
common_type != ScalarType::Bool),
InvalidArgument,
out);

ScalarType a_type = a.scalar_type();
ScalarType b_type = b.scalar_type();
ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true);
ScalarType out_type = out.scalar_type();
// Check Dim Order
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out);

// Resize
ET_KERNEL_CHECK(
ctx, common_type != exec_aten::ScalarType::Bool, InvalidArgument, out);
ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out);

ET_SWITCH_REALHB_TYPES(a_type, ctx, "pow.Tensor_Tensor_out", CTYPE_A, [&]() {
ET_SWITCH_REALHB_TYPES(
b_type, ctx, "pow.Tensor_Tensor_out", CTYPE_B, [&]() {
using CTYPE_IN = typename torch::executor::
promote_types<CTYPE_A, CTYPE_B, /*half_to_float*/ true>::type;
ET_DCHECK(CppTypeToScalarType<CTYPE_IN>::value == common_type);
ET_SWITCH_REALH_TYPES(
out_type, ctx, "pow.Tensor_Tensor_out", CTYPE_OUT, [&]() {
PowInner<
!std::is_same<CTYPE_IN, bool>::value &&
can_cast<CTYPE_IN, CTYPE_OUT>::value,
CTYPE_A,
CTYPE_B,
CTYPE_IN,
CTYPE_OUT>::run(a, b, out);
});
});
ctx,
resize_to_broadcast_target_size(a, b, out) == Error::Ok,
InvalidArgument,
out);

// Compute Dtype
ScalarType compute_type = utils::get_compute_type(common_type);
if (compute_type != ScalarType::Float) {
compute_type = ScalarType::Double;
}

static constexpr const char op_name[] = "pow.Tensor_Tensor_out";

ET_SWITCH_FLOAT_TYPES(compute_type, ctx, op_name, CTYPE_COMPUTE, [&]() {
utils::apply_bitensor_elementwise_fn<CTYPE_COMPUTE, op_name>(
[](const CTYPE_COMPUTE val_a, const CTYPE_COMPUTE val_b) {
return std::pow(val_a, val_b);
},
ctx,
a,
utils::SupportedTensorDtypes::REALHBBF16,
b,
utils::SupportedTensorDtypes::REALHBBF16,
out,
utils::SupportedTensorDtypes::REALHBF16);
});

return out;
Expand All @@ -114,51 +73,44 @@ Tensor& pow_Tensor_Scalar_out(
const Tensor& a,
const Scalar& b,
Tensor& out) {
(void)ctx;
// Common Dtype
ScalarType common_type = utils::promote_type_with_scalar(a.scalar_type(), b);

// Resize for dynamic shape
ET_KERNEL_CHECK_MSG(
// Check Common Dtype
ET_KERNEL_CHECK(
ctx,
resize_tensor(out, a.sizes()) == Error::Ok,
(canCast(common_type, out.scalar_type()) &&
common_type != ScalarType::Bool),
InvalidArgument,
out,
"Failed to resize output tensor.");
out);

ScalarType a_type = a.scalar_type();
ScalarType b_type = utils::get_scalar_dtype(b);
ScalarType common_type =
utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false);
ScalarType out_type = out.scalar_type();
// Check Dim Order
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out);

ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out);
// Resize
ET_KERNEL_CHECK(
ctx, resize_tensor(out, a.sizes()) == Error::Ok, InvalidArgument, out);

if (common_type == ScalarType::Half) {
common_type = ScalarType::Float;
// Compute Dtype
ScalarType compute_type = utils::get_compute_type(common_type);
if (compute_type != ScalarType::Float) {
compute_type = ScalarType::Double;
}

ET_SWITCH_REALHB_TYPES(a_type, ctx, "pow.Tensor_Scalar_out", CTYPE_A, [&]() {
ET_SWITCH_SCALAR_OBJ_TYPES(
b_type, ctx, "pow.Tensor_Scalar_out", CTYPE_B, [&]() {
ET_SWITCH_REAL_TYPES(
common_type, ctx, "pow.Tensor_Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALH_TYPES(
out_type, ctx, "pow.Tensor_Scalar_out", CTYPE_OUT, [&]() {
CTYPE_B val_b = 0;
utils::extract_scalar(b, &val_b);
apply_unary_map_fn(
[val_b](const CTYPE_A val_a) {
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b);
CTYPE_IN value = std::pow(a_casted, b_casted);

return static_cast<CTYPE_OUT>(value);
},
a.const_data_ptr<CTYPE_A>(),
out.mutable_data_ptr<CTYPE_OUT>(),
out.numel());
});
});
});
static constexpr const char op_name[] = "pow.Tensor_Scalar_out";

ET_SWITCH_FLOAT_TYPES(compute_type, ctx, op_name, CTYPE_COMPUTE, [&]() {
const CTYPE_COMPUTE val_b = utils::scalar_to<CTYPE_COMPUTE>(b);
utils::apply_unitensor_elementwise_fn<CTYPE_COMPUTE, op_name>(
[val_b](const CTYPE_COMPUTE val_a) {
return std::pow(val_a, val_b);
},
ctx,
a,
utils::SupportedTensorDtypes::REALHBBF16,
out,
utils::SupportedTensorDtypes::REALHBF16);
});

return out;
Expand All @@ -169,49 +121,44 @@ Tensor& pow_Scalar_out(
const Scalar& a,
const Tensor& b,
Tensor& out) {
(void)ctx;
// Common Dtype
ScalarType common_type = utils::promote_type_with_scalar(b.scalar_type(), a);

// Resize for dynamic shape
ET_KERNEL_CHECK_MSG(
// Check Common Dtype
ET_KERNEL_CHECK(
ctx,
resize_tensor(out, b.sizes()) == Error::Ok,
(canCast(common_type, out.scalar_type()) &&
common_type != ScalarType::Bool),
InvalidArgument,
out,
"Failed to resize output tensor.");
out);

ScalarType a_type = utils::get_scalar_dtype(a);
ScalarType b_type = b.scalar_type();
ScalarType common_type =
utils::promote_type_with_scalar(b_type, a, /*half_to_float*/ false);
ScalarType out_type = out.scalar_type();
// Check Dim Order
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(b, out), InvalidArgument, out);

ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out);
// Resize
ET_KERNEL_CHECK(
ctx, resize_tensor(out, b.sizes()) == Error::Ok, InvalidArgument, out);

if (common_type == ScalarType::Half) {
common_type = ScalarType::Float;
// Compute Dtype
ScalarType compute_type = utils::get_compute_type(common_type);
if (compute_type != ScalarType::Float) {
compute_type = ScalarType::Double;
}

ET_SWITCH_SCALAR_OBJ_TYPES(a_type, ctx, "pow.Scalar_out", CTYPE_A, [&]() {
ET_SWITCH_REALHB_TYPES(b_type, ctx, "pow.Scalar_out", CTYPE_B, [&]() {
ET_SWITCH_REAL_TYPES(common_type, ctx, "pow.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REALH_TYPES(
out_type, ctx, "pow.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_A val_a = 0;
utils::extract_scalar(a, &val_a);

apply_unary_map_fn(
[val_a](const CTYPE_B val_b) {
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b);
CTYPE_IN value = std::pow(a_casted, b_casted);
return static_cast<CTYPE_OUT>(value);
},
b.const_data_ptr<CTYPE_B>(),
out.mutable_data_ptr<CTYPE_OUT>(),
out.numel());
});
});
});
static constexpr const char op_name[] = "pow.Scalar_out";

ET_SWITCH_FLOAT_TYPES(compute_type, ctx, op_name, CTYPE_COMPUTE, [&]() {
const CTYPE_COMPUTE val_a = utils::scalar_to<CTYPE_COMPUTE>(a);
utils::apply_unitensor_elementwise_fn<CTYPE_COMPUTE, op_name>(
[val_a](const CTYPE_COMPUTE val_b) {
return std::pow(val_a, val_b);
},
ctx,
b,
utils::SupportedTensorDtypes::REALHBBF16,
out,
utils::SupportedTensorDtypes::REALHBF16);
});

return out;
Expand Down
73 changes: 31 additions & 42 deletions kernels/portable/cpu/op_rsub.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
*/

#include <executorch/kernels/portable/cpu/scalar_utils.h>
#include <executorch/kernels/portable/cpu/util/functional_util.h>
#include <executorch/kernels/portable/cpu/util/kernel_ops_util.h>
#include <executorch/kernels/portable/cpu/util/elementwise_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

namespace torch {
Expand All @@ -21,57 +20,47 @@ Tensor& rsub_scalar_out(
const Scalar& b,
const Scalar& alpha,
Tensor& out) {
(void)ctx;
ScalarType alpha_type = utils::get_scalar_dtype(alpha);

// Check alpha type
ET_KERNEL_CHECK(ctx, alpha_type != ScalarType::Bool, InvalidArgument, out);

// Resize for dynamic shape
ET_KERNEL_CHECK_MSG(
// Common Dtype
ScalarType common_type = utils::promote_type_with_scalar(a.scalar_type(), b);

// Check Common Dtype
ET_KERNEL_CHECK(
ctx,
resize_tensor(out, a.sizes()) == Error::Ok,
(common_type == out.scalar_type() && canCast(alpha_type, common_type)),
InvalidArgument,
out,
"Failed to resize output tensor.");
out);

// Check Dim Order
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out);

ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out);
// Resize
ET_KERNEL_CHECK(
ctx, resize_tensor(out, a.sizes()) == Error::Ok, InvalidArgument, out);

ScalarType a_type = a.scalar_type();
ScalarType b_type = utils::get_scalar_dtype(b);
ScalarType alpha_type = utils::get_scalar_dtype(alpha);
ScalarType common_type = utils::promote_type_with_scalar(a_type, b);
ScalarType out_type = out.scalar_type();
// Compute Dtype
ScalarType compute_type = utils::get_compute_type(common_type);

ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out);
ET_KERNEL_CHECK(
ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out);
ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out);
static constexpr const char op_name[] = "rsub.Scalar_out";

ET_SWITCH_REAL_TYPES(a_type, ctx, "rsub.Scalar_out", CTYPE_A, [&]() {
ET_SWITCH_SCALAR_OBJ_REAL_TYPES(
b_type, ctx, "rsub.Scalar_out", CTYPE_B, [&]() {
ET_SWITCH_REAL_TYPES(
common_type, ctx, "rsub.Scalar_out", CTYPE_IN, [&]() {
ET_SWITCH_REAL_TYPES(
out_type, ctx, "rsub.Scalar_out", CTYPE_OUT, [&]() {
CTYPE_B b_val;
utils::extract_scalar(b, &b_val);
CTYPE_IN b_casted = static_cast<CTYPE_IN>(b_val);
CTYPE_IN alpha_val;
utils::extract_scalar(alpha, &alpha_val);
ET_SWITCH_REAL_TYPES(compute_type, ctx, op_name, CTYPE_COMPUTE, [&]() {
const CTYPE_COMPUTE val_b = utils::scalar_to<CTYPE_COMPUTE>(b);
const CTYPE_COMPUTE val_alpha = utils::scalar_to<CTYPE_COMPUTE>(alpha);
utils::apply_unitensor_elementwise_fn<CTYPE_COMPUTE, op_name>(
[val_b, val_alpha](const CTYPE_COMPUTE val_a) {

apply_unary_map_fn(
[b_casted, alpha_val](const CTYPE_A val_a) {
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
CTYPE_IN value = b_casted - alpha_val * a_casted;
return static_cast<CTYPE_OUT>(value);
},
a.const_data_ptr<CTYPE_A>(),
out.mutable_data_ptr<CTYPE_OUT>(),
out.numel());
});
});
});
return val_b - val_alpha * val_a;
},
ctx,
a,
utils::SupportedTensorDtypes::REALHBF16,
out,
utils::SupportedTensorDtypes::SAME_AS_COMMON);
});

return out;
Expand Down
Loading
Loading