Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_ceil.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace native {
using executorch::aten::Tensor;

Tensor& ceil_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
return internal::unary_ufunc_realh(std::ceil, ctx, in, out);
return internal::unary_ufunc_realhbf16(std::ceil, ctx, in, out);
}

} // namespace native
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_floor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace native {
using executorch::aten::Tensor;

Tensor& floor_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
return internal::unary_ufunc_realh(std::floor, ctx, in, out);
return internal::unary_ufunc_realhbf16(std::floor, ctx, in, out);
}

} // namespace native
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_trunc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ namespace executor {
namespace native {

Tensor& trunc_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
return internal::unary_ufunc_realh(std::trunc, ctx, in, out);
return internal::unary_ufunc_realhbf16(std::trunc, ctx, in, out);
}

} // namespace native
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/pattern/pattern.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ namespace internal {
* and dtype. The function fn specifies the math operation which is applied to
* the input tensor element-wise.
*/
Tensor& unary_ufunc_realh(
Tensor& unary_ufunc_realhbf16(
double (*fn)(double),
KernelRuntimeContext& ctx,
const Tensor& in,
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/pattern/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def define_common_targets():
srcs = [
"unary_ufunc_realhb_to_bool.cpp",
"unary_ufunc_realhbbf16_to_floathbf16.cpp",
"unary_ufunc_realh.cpp",
"unary_ufunc_realhbf16.cpp",
],
exported_headers = [
"pattern.h",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ namespace executor {
namespace native {
namespace internal {

Tensor& unary_ufunc_realh(
Tensor& unary_ufunc_realhbf16(
double (*fn)(double),
KernelRuntimeContext& ctx,
const Tensor& in,
Expand All @@ -36,7 +36,7 @@ Tensor& unary_ufunc_realh(
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);

ET_SWITCH_REALH_TYPES(in.scalar_type(), ctx, __func__, CTYPE, [&] {
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, __func__, CTYPE, [&] {
apply_unary_map_fn(
[fn](const CTYPE val_in) { return static_cast<CTYPE>(fn(val_in)); },
in.const_data_ptr<CTYPE>(),
Expand Down
39 changes: 17 additions & 22 deletions kernels/test/op_ceil_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,33 +25,28 @@ class OpCeilTest : public OperatorTest {
Tensor& op_ceil_out(const Tensor& self, Tensor& out) {
return torch::executor::aten::ceil_outf(context_, self, out);
}
};

TEST_F(OpCeilTest, SanityCheck) {
TensorFactory<ScalarType::Float> tf;
template <ScalarType DTYPE>
void test_ceil_float_dtype() {
TensorFactory<DTYPE> tf;

Tensor in = tf.make({1, 7}, {-3.0, -2.99, -1.01, 0.0, 1.01, 2.99, 3.0});
Tensor out = tf.zeros({1, 7});
Tensor expected = tf.make({1, 7}, {-3.0, -2.0, -1.0, 0.0, 2.0, 3.0, 3.0});
Tensor in = tf.make({1, 7}, {-3.0, -2.99, -1.01, 0.0, 1.01, 2.99, 3.0});
Tensor out = tf.zeros({1, 7});
Tensor expected = tf.make({1, 7}, {-3.0, -2.0, -1.0, 0.0, 2.0, 3.0, 3.0});

Tensor ret = op_ceil_out(in, out);
Tensor ret = op_ceil_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
};

TEST_F(OpCeilTest, HalfSupport) {
TEST_F(OpCeilTest, AllFloatDtypeSupport) {
#define TEST_ENTRY(ctype, dtype) test_ceil_float_dtype<ScalarType::dtype>();
if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
GTEST_SKIP() << "Test Half support only for ExecuTorch mode";
ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
} else {
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
}
TensorFactory<ScalarType::Half> tf;

Tensor in = tf.make({1, 7}, {-3.0, -2.99, -1.01, 0.0, 1.01, 2.99, 3.0});
Tensor out = tf.zeros({1, 7});
Tensor expected = tf.make({1, 7}, {-3.0, -2.0, -1.0, 0.0, 2.0, 3.0, 3.0});

Tensor ret = op_ceil_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
#undef TEST_ENTRY
}
39 changes: 17 additions & 22 deletions kernels/test/op_floor_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,33 +25,28 @@ class OpFloorTest : public OperatorTest {
Tensor& op_floor_out(const Tensor& self, Tensor& out) {
return torch::executor::aten::floor_outf(context_, self, out);
}
};

TEST_F(OpFloorTest, SanityCheck) {
TensorFactory<ScalarType::Float> tf;
template <ScalarType DTYPE>
void test_floor_float_dtype() {
TensorFactory<DTYPE> tf;

Tensor in = tf.make({1, 7}, {-3.0, -2.99, -1.01, 0.0, 1.01, 2.99, 3.0});
Tensor out = tf.zeros({1, 7});
Tensor expected = tf.make({1, 7}, {-3.0, -3.0, -2.0, 0.0, 1.0, 2.0, 3.0});
Tensor in = tf.make({1, 7}, {-3.0, -2.99, -1.01, 0.0, 1.01, 2.99, 3.0});
Tensor out = tf.zeros({1, 7});
Tensor expected = tf.make({1, 7}, {-3.0, -3.0, -2.0, 0.0, 1.0, 2.0, 3.0});

Tensor ret = op_floor_out(in, out);
Tensor ret = op_floor_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
};

TEST_F(OpFloorTest, HalfSupport) {
TEST_F(OpFloorTest, AllFloatDtypeSupport) {
#define TEST_ENTRY(ctype, dtype) test_floor_float_dtype<ScalarType::dtype>();
if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
GTEST_SKIP() << "Test Half support only for ExecuTorch mode";
ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
} else {
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
}
TensorFactory<ScalarType::Half> tf;

Tensor in = tf.make({1, 7}, {-3.0, -2.99, -1.01, 0.0, 1.01, 2.99, 3.0});
Tensor out = tf.zeros({1, 7});
Tensor expected = tf.make({1, 7}, {-3.0, -3.0, -2.0, 0.0, 1.0, 2.0, 3.0});

Tensor ret = op_floor_out(in, out);

EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
#undef TEST_ENTRY
}
40 changes: 27 additions & 13 deletions kernels/test/op_trunc_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,33 @@ using executorch::aten::Tensor;
using torch::executor::testing::SupportedFeatures;
using torch::executor::testing::TensorFactory;

Tensor& op_trunc_out(const Tensor& a, Tensor& out) {
executorch::runtime::KernelRuntimeContext context{};
return torch::executor::aten::trunc_outf(context, a, out);
}
class OpTruncTest : public OperatorTest {
protected:
Tensor& op_trunc_out(const Tensor& self, Tensor& out) {
return torch::executor::aten::trunc_outf(context_, self, out);
}

template <ScalarType DTYPE>
void test_trunc_float_dtype() {
TensorFactory<DTYPE> tf;

Tensor in = tf.make({1, 6}, {60.5, 16.25, -95.0, -36.125, 19.0, -47.75});
Tensor out = tf.zeros({1, 6});
Tensor expected = tf.make({1, 6}, {60.0, 16.0, -95.0, -36.0, 19.0, -47.0});

Tensor ret = op_trunc_out(in, out);

TEST(OpTruncOutTest, SmokeTest) {
TensorFactory<ScalarType::Double> tfDouble;
EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, expected);
}
};

Tensor self =
tfDouble.make({1, 6}, {60.5, 16.25, -95.0, -36.125, 19.0, -47.75});
Tensor out = tfDouble.zeros({1, 6});
Tensor out_expected =
tfDouble.make({1, 6}, {60.0, 16.0, -95.0, -36.0, 19.0, -47.0});
op_trunc_out(self, out);
EXPECT_TENSOR_CLOSE(out, out_expected);
TEST_F(OpTruncTest, AllFloatDtypeSupport) {
#define TEST_ENTRY(ctype, dtype) test_trunc_float_dtype<ScalarType::dtype>();
if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
} else {
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
}
#undef TEST_ENTRY
}
Loading