Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_abs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Tensor& abs_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);

ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE, [&] {
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE, [&] {
apply_unary_map_fn(
[](const CTYPE val_in) {
if (val_in < 0) {
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_full.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Tensor& full_out(
CTYPE_VAL val;
utils::extract_scalar(fill_value, &val);

ET_SWITCH_REALHB_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
CTYPE_OUT val_casted = static_cast<CTYPE_OUT>(val);
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
for (size_t i = 0; i < out.numel(); ++i) {
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_gelu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Tensor& gelu_out(
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);

ET_SWITCH_FLOAT_TYPES(in.scalar_type(), ctx, "gelu.out", CTYPE, [&]() {
ET_SWITCH_FLOATH_TYPES(in.scalar_type(), ctx, "gelu.out", CTYPE, [&]() {
if (approximate == "tanh") {
apply_unary_map_fn(
[](const CTYPE x) {
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_hardtanh.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Tensor& hardtanh_out(

ET_KERNEL_CHECK(ctx, in_type == out_type, InvalidArgument, out);

ET_SWITCH_REAL_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() {
ET_SWITCH_REALHBF16_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() {
CTYPE min_casted;
ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "hardtanh.out", CTYPE_MIN, [&]() {
CTYPE_MIN min_val;
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_logit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ Tensor& logit_out(

ScalarType in_type = in.scalar_type();
ScalarType out_type = out.scalar_type();
ET_SWITCH_REAL_TYPES_AND(Bool, in_type, ctx, "logit.out", CTYPE_IN, [&] {
ET_SWITCH_FLOAT_TYPES(out_type, ctx, "logit.out", CTYPE_OUT, [&] {
ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "logit.out", CTYPE_IN, [&] {
ET_SWITCH_FLOATHBF16_TYPES(out_type, ctx, "logit.out", CTYPE_OUT, [&] {
apply_unary_map_fn(
[eps](const CTYPE_IN val_in) {
CTYPE_OUT xi = static_cast<CTYPE_OUT>(val_in);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_neg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Tensor& neg_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);

ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "neg.out", CTYPE, [&] {
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "neg.out", CTYPE, [&] {
apply_unary_map_fn(
[](const CTYPE val_in) { return static_cast<CTYPE>(-val_in); },
in.const_data_ptr<CTYPE>(),
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_sign.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ Tensor& sign_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
if (in.scalar_type() == exec_aten::ScalarType::Bool) {
memcpy(out.mutable_data_ptr(), in.const_data_ptr(), in.nbytes());
} else {
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "sign.out", CTYPE, [&] {
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "sign.out", CTYPE, [&] {
apply_unary_map_fn(
[](const CTYPE val_in) {
if (std::isnan(val_in)) {
Expand Down
12 changes: 8 additions & 4 deletions kernels/portable/cpu/util/math_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,10 @@ INT_T max_override(INT_T a, INT_T b) {

template <
typename T,
typename std::enable_if<std::is_same<T, exec_aten::Half>::value, bool>::
type = true>
typename std::enable_if<
std::is_same<T, exec_aten::Half>::value ||
std::is_same<T, exec_aten::BFloat16>::value,
bool>::type = true>
T min_override(T a, T b) {
const auto float_a = static_cast<float>(a);
if (std::isnan(float_a)) {
Expand All @@ -116,8 +118,10 @@ T min_override(T a, T b) {

template <
typename T,
typename std::enable_if<std::is_same<T, exec_aten::Half>::value, bool>::
type = true>
typename std::enable_if<
std::is_same<T, exec_aten::Half>::value ||
std::is_same<T, exec_aten::BFloat16>::value,
bool>::type = true>
T max_override(T a, T b) {
const auto float_a = static_cast<float>(a);
if (std::isnan(float_a)) {
Expand Down
36 changes: 36 additions & 0 deletions kernels/test/op_abs_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,44 @@ class OpAbsTest : public OperatorTest {
Tensor& op_abs_out(const Tensor& self, Tensor& out) {
return torch::executor::aten::abs_outf(context_, self, out);
}

template <ScalarType DTYPE>
void test_dtype() {
TensorFactory<DTYPE> tf;

Tensor in = tf.make({2, 3}, {-3, -2, -1, 0, 1, 2});
Tensor out = tf.zeros({2, 3});
Tensor expected = tf.make({2, 3}, {3, 2, 1, 0, 1, 2});

Tensor ret = op_abs_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}

template <>
void test_dtype<ScalarType::Byte>() {
TensorFactory<ScalarType::Byte> tf;

Tensor in = tf.make({2, 3}, {253, 254, 255, 0, 1, 2});
Tensor out = tf.zeros({2, 3});
Tensor expected = tf.make({2, 3}, {253, 254, 255, 0, 1, 2});

Tensor ret = op_abs_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
};

TEST_F(OpAbsTest, AllRealHBF16Input) {
#define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE) \
test_dtype<ScalarType::INPUT_DTYPE>();

ET_FORALL_REALHBF16_TYPES(TEST_KERNEL);
#undef TEST_KERNEL
}

TEST_F(OpAbsTest, SanityCheck) {
TensorFactory<ScalarType::Float> tf;

Expand Down
23 changes: 23 additions & 0 deletions kernels/test/op_full_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,3 +122,26 @@ TEST_F(OpFullOutTest, ZeroDim) {
op_full_out(sizes, true, out);
EXPECT_TENSOR_EQ(out, tf.ones(sizes_in32_t_vec));
}

TEST_F(OpFullOutTest, BFloat16Support) {
TensorFactory<ScalarType::BFloat16> tf;

std::vector<int64_t> sizes_int64_t_vec = {2, 3};
std::vector<int32_t> sizes_in32_t_vec = {2, 3};
auto sizes = IntArrayRef(sizes_int64_t_vec.data(), sizes_int64_t_vec.size());

// Boolean Scalar
Tensor out = tf.zeros(sizes_in32_t_vec);
op_full_out(sizes, true, out);
EXPECT_TENSOR_EQ(out, tf.ones(sizes_in32_t_vec));

// Integral Scalar
out = tf.zeros(sizes_in32_t_vec);
op_full_out(sizes, 1, out);
EXPECT_TENSOR_EQ(out, tf.ones(sizes_in32_t_vec));

// Floating Point Scalar
out = tf.zeros(sizes_in32_t_vec);
op_full_out(sizes, 3.1415926535, out);
EXPECT_TENSOR_EQ(out, tf.full(sizes_in32_t_vec, 3.1415926535));
}
4 changes: 4 additions & 0 deletions kernels/test/op_gelu_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,10 @@ class OpGeluTest : public OperatorTest {
}
};

TEST_F(OpGeluTest, HalfTensors) {
test_gelu_execution<ScalarType::Half>();
}

TEST_F(OpGeluTest, FloatTensors) {
test_gelu_execution<ScalarType::Float>();
}
Expand Down
24 changes: 24 additions & 0 deletions kernels/test/op_hardtanh_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,32 @@ class OpHardTanhTest : public OperatorTest {
return torch::executor::aten::hardtanh_outf(
context_, self, min_val, max_val, out);
}

template <ScalarType DTYPE>
void test_dtype() {
TensorFactory<DTYPE> tf;

Tensor in = tf.make({2, 3}, {0, 1, 2, 3, 4, 5});
Scalar min_val = 1;
Scalar max_val = 4;
Tensor out = tf.zeros({2, 3});
Tensor expected = tf.make({2, 3}, {1, 1, 2, 3, 4, 4});

Tensor ret = op_hardtanh_out(in, min_val, max_val, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
};

TEST_F(OpHardTanhTest, AllRealHBF16Input) {
#define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE) \
test_dtype<ScalarType::INPUT_DTYPE>();

ET_FORALL_REALHBF16_TYPES(TEST_KERNEL);
#undef TEST_KERNEL
}

TEST_F(OpHardTanhTest, SanityCheck) {
TensorFactory<ScalarType::Float> tf;
Tensor in = tf.ones({2, 2});
Expand Down
2 changes: 1 addition & 1 deletion kernels/test/op_logit_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ void OpLogitOutTest::
TEST_F(OpLogitOutTest, AllRealInputFloatOutputSupport) {
#define TEST_ENTRY(ctype, dtype) \
test_integer_logit_out<ScalarType::dtype, ScalarType::Float>();
ET_FORALL_REAL_TYPES(TEST_ENTRY);
ET_FORALL_REALHBF16_TYPES(TEST_ENTRY);
#undef TEST_ENTRY
}

Expand Down
36 changes: 36 additions & 0 deletions kernels/test/op_neg_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,44 @@ class OpNegTest : public OperatorTest {
Tensor& op_neg_out(const Tensor& self, Tensor& out) {
return torch::executor::aten::neg_outf(context_, self, out);
}

template <ScalarType DTYPE>
void test_dtype() {
TensorFactory<DTYPE> tf;

Tensor in = tf.make({2, 3}, {-3, -2, -1, 0, 1, 2});
Tensor out = tf.zeros({2, 3});
Tensor expected = tf.make({2, 3}, {3, 2, 1, 0, -1, -2});

Tensor ret = op_neg_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}

template <>
void test_dtype<ScalarType::Byte>() {
TensorFactory<ScalarType::Byte> tf;

Tensor in = tf.make({2, 3}, {253, 254, 255, 0, 1, 2});
Tensor out = tf.zeros({2, 3});
Tensor expected = tf.make({2, 3}, {3, 2, 1, 0, 255, 254});

Tensor ret = op_neg_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
};

TEST_F(OpNegTest, AllRealHBF16Input) {
#define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE) \
test_dtype<ScalarType::INPUT_DTYPE>();

ET_FORALL_REALHBF16_TYPES(TEST_KERNEL);
#undef TEST_KERNEL
}

TEST_F(OpNegTest, SanityCheck) {
TensorFactory<ScalarType::Float> tf;

Expand Down
36 changes: 36 additions & 0 deletions kernels/test/op_sign_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,44 @@ class OpSignTest : public OperatorTest {
Tensor& op_sign_out(const Tensor& self, Tensor& out) {
return torch::executor::aten::sign_outf(context_, self, out);
}

template <ScalarType DTYPE>
void test_dtype() {
TensorFactory<DTYPE> tf;

Tensor in = tf.make({2, 3}, {-3, -2, -1, 0, 1, 2});
Tensor out = tf.zeros({2, 3});
Tensor expected = tf.make({2, 3}, {-1, -1, -1, 0, 1, 1});

Tensor ret = op_sign_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}

template <>
void test_dtype<ScalarType::Byte>() {
TensorFactory<ScalarType::Byte> tf;

Tensor in = tf.make({2, 3}, {253, 254, 255, 0, 1, 2});
Tensor out = tf.zeros({2, 3});
Tensor expected = tf.make({2, 3}, {1, 1, 1, 0, 1, 1});

Tensor ret = op_sign_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
};

TEST_F(OpSignTest, AllRealHBF16Input) {
#define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE) \
test_dtype<ScalarType::INPUT_DTYPE>();

ET_FORALL_REALHBF16_TYPES(TEST_KERNEL);
#undef TEST_KERNEL
}

TEST_F(OpSignTest, ETSanityCheckFloat) {
if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
GTEST_SKIP() << "ATen returns 0 on NAN input";
Expand Down
Loading