diff --git a/backends/cadence/reference/operators/quantized_layer_norm.cpp b/backends/cadence/reference/operators/quantized_layer_norm.cpp index ab1af935eec..574bcef1b22 100644 --- a/backends/cadence/reference/operators/quantized_layer_norm.cpp +++ b/backends/cadence/reference/operators/quantized_layer_norm.cpp @@ -11,7 +11,8 @@ #include -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; +using executorch::runtime::getLeadingDims; using executorch::runtime::KernelRuntimeContext; namespace impl { diff --git a/backends/cadence/reference/operators/quantized_linear_out.cpp b/backends/cadence/reference/operators/quantized_linear_out.cpp index 300158d8e5e..c85e3a59603 100644 --- a/backends/cadence/reference/operators/quantized_linear_out.cpp +++ b/backends/cadence/reference/operators/quantized_linear_out.cpp @@ -13,7 +13,8 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; +using executorch::runtime::getLeadingDims; using executorch::runtime::KernelRuntimeContext; void quantized_linear_out( diff --git a/backends/cadence/reference/operators/quantized_matmul_out.cpp b/backends/cadence/reference/operators/quantized_matmul_out.cpp index b381a8ee394..b0a9393cd01 100644 --- a/backends/cadence/reference/operators/quantized_matmul_out.cpp +++ b/backends/cadence/reference/operators/quantized_matmul_out.cpp @@ -13,7 +13,8 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; +using executorch::runtime::getLeadingDims; using executorch::runtime::KernelRuntimeContext; // The quantized matmul. The quantized matmul accumulates in a wider register, diff --git a/kernels/optimized/blas/BlasKernel.h b/kernels/optimized/blas/BlasKernel.h index 2c03ed0b638..c2b03cfebdd 100644 --- a/kernels/optimized/blas/BlasKernel.h +++ b/kernels/optimized/blas/BlasKernel.h @@ -16,12 +16,6 @@ #include -namespace torch { -namespace executor { -struct BFloat16; -} // namespace executor -} // namespace torch - namespace executorch { namespace cpublas { diff --git a/kernels/portable/cpu/util/test/broadcast_test.cpp b/kernels/portable/cpu/util/test/broadcast_test.cpp index 9ad6555d94e..d87e8ecec85 100644 --- a/kernels/portable/cpu/util/test/broadcast_test.cpp +++ b/kernels/portable/cpu/util/test/broadcast_test.cpp @@ -22,6 +22,12 @@ using exec_aten::ScalarType; using exec_aten::Tensor; using executorch::runtime::ArrayRef; using executorch::runtime::testing::TensorFactory; +using torch::executor::broadcast_tensor; +using torch::executor::delinearize_index; +using torch::executor::get_broadcast_target_size; +using torch::executor::linearize_access_indexes; +using torch::executor::tensor_is_broadcastable_to; +using torch::executor::tensors_are_broadcastable_between; TEST(BroadcastUtilTest, BroadcastTensor) { TensorFactory tf; diff --git a/kernels/portable/cpu/util/test/reduce_test.cpp b/kernels/portable/cpu/util/test/reduce_test.cpp index 9ee37aab657..e7bb03c30c8 100644 --- a/kernels/portable/cpu/util/test/reduce_test.cpp +++ b/kernels/portable/cpu/util/test/reduce_test.cpp @@ -19,7 +19,10 @@ using exec_aten::ArrayRef; using exec_aten::optional; using exec_aten::ScalarType; using exec_aten::Tensor; -using torch::executor::testing::TensorFactory; +using executorch::runtime::testing::TensorFactory; +using torch::executor::apply_over_dim; +using torch::executor::apply_over_dim_list; +using torch::executor::get_out_numel; void _apply_over_dim(const Tensor& in, const optional& dim) { int64_t* in_data = in.mutable_data_ptr(); diff --git a/kernels/test/custom_kernel_example/my_functions.yaml b/kernels/test/custom_kernel_example/my_functions.yaml index 72f1d2cf865..de5ce952ab4 100644 --- a/kernels/test/custom_kernel_example/my_functions.yaml +++ b/kernels/test/custom_kernel_example/my_functions.yaml @@ -5,4 +5,4 @@ - op: relu.out kernels: - arg_meta: null - kernel_name: torch::my_custom_kernel::my_relu_out + kernel_name: my_custom_kernels::my_relu_out diff --git a/kernels/test/custom_kernel_example/op_relu.cpp b/kernels/test/custom_kernel_example/op_relu.cpp index e59fbf4bd72..39be620d86b 100644 --- a/kernels/test/custom_kernel_example/op_relu.cpp +++ b/kernels/test/custom_kernel_example/op_relu.cpp @@ -12,14 +12,15 @@ #include #include -namespace torch { -namespace my_custom_kernel { +namespace my_custom_kernels { namespace native { -using Tensor = exec_aten::Tensor; -using ScalarType = exec_aten::ScalarType; -using executor::Error; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::Error; using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::resize_tensor; +using executorch::runtime::tensors_have_same_shape_and_dtype; namespace { @@ -67,7 +68,7 @@ my_relu_out(KernelRuntimeContext& context, const Tensor& input, Tensor& out) { resize(out, input.sizes()); ET_KERNEL_CHECK( context, - executor::tensors_have_same_shape_and_dtype(input, out), + tensors_have_same_shape_and_dtype(input, out), InvalidArgument, out); @@ -94,5 +95,4 @@ my_relu_out(KernelRuntimeContext& context, const Tensor& input, Tensor& out) { } } // namespace native -} // namespace my_custom_kernel -} // namespace torch +} // namespace my_custom_kernels diff --git a/kernels/test/op_add_test.cpp b/kernels/test/op_add_test.cpp index e35a4100c9a..0e4e2fc6359 100644 --- a/kernels/test/op_add_test.cpp +++ b/kernels/test/op_add_test.cpp @@ -18,11 +18,12 @@ #include using namespace ::testing; -using exec_aten::Scalar; -using exec_aten::ScalarType; -using exec_aten::Tensor; +using executorch::aten::Scalar; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::testing::TensorFactory; using torch::executor::testing::SupportedFeatures; -using torch::executor::testing::TensorFactory; +namespace etrt = executorch::runtime; class OpAddOutKernelTest : public OperatorTest { protected: @@ -63,7 +64,8 @@ class OpAddOutKernelTest : public OperatorTest { test_add(); test_add(); // Integral out type is only allowed if both inputs are integral types - if (isIntegralType(DTYPE_A, false) && isIntegralType(DTYPE_B, false)) { + if (etrt::isIntegralType(DTYPE_A, false) && + etrt::isIntegralType(DTYPE_B, false)) { test_add(); test_add(); } diff --git a/kernels/test/op_mul_test.cpp b/kernels/test/op_mul_test.cpp index f8205ea601e..f3c9e54c862 100644 --- a/kernels/test/op_mul_test.cpp +++ b/kernels/test/op_mul_test.cpp @@ -17,11 +17,12 @@ #include using namespace ::testing; -using exec_aten::Scalar; -using exec_aten::ScalarType; -using exec_aten::Tensor; +using executorch::aten::Scalar; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::testing::TensorFactory; using torch::executor::testing::SupportedFeatures; -using torch::executor::testing::TensorFactory; +namespace etrt = executorch::runtime; class OpMulOutTest : public OperatorTest { protected: @@ -61,7 +62,8 @@ class OpMulOutTest : public OperatorTest { test_mul(); test_mul(); // Integral out type is only allowed if both inputs are integral types - if (isIntegralType(DTYPE_A, false) && isIntegralType(DTYPE_B, false)) { + if (etrt::isIntegralType(DTYPE_A, false) && + etrt::isIntegralType(DTYPE_B, false)) { test_mul(); test_mul(); } diff --git a/kernels/test/op_sub_test.cpp b/kernels/test/op_sub_test.cpp index 9f795516723..886adaf2e9d 100644 --- a/kernels/test/op_sub_test.cpp +++ b/kernels/test/op_sub_test.cpp @@ -16,11 +16,12 @@ #include using namespace ::testing; -using exec_aten::Scalar; -using exec_aten::ScalarType; -using exec_aten::Tensor; +using executorch::aten::Scalar; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::testing::TensorFactory; using torch::executor::testing::SupportedFeatures; -using torch::executor::testing::TensorFactory; +namespace etrt = executorch::runtime; class OpSubOutTest : public OperatorTest { protected: @@ -60,7 +61,8 @@ class OpSubOutTest : public OperatorTest { test_sub(); test_sub(); // Integral out type is only allowed if both inputs are integral types - if (isIntegralType(DTYPE_A, false) && isIntegralType(DTYPE_B, false)) { + if (etrt::isIntegralType(DTYPE_A, false) && + etrt::isIntegralType(DTYPE_B, false)) { test_sub(); test_sub(); } diff --git a/runtime/core/exec_aten/testing_util/tensor_util.cpp b/runtime/core/exec_aten/testing_util/tensor_util.cpp index 0301cc9a519..d2ced08d0fa 100644 --- a/runtime/core/exec_aten/testing_util/tensor_util.cpp +++ b/runtime/core/exec_aten/testing_util/tensor_util.cpp @@ -208,8 +208,9 @@ bool tensor_lists_are_close( * These functions must be declared in the original namespaces of their * associated types so that C++ can find them. */ -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * Prints the ScalarType to the stream as a human-readable string. @@ -298,7 +299,8 @@ std::ostream& operator<<(std::ostream& os, const Tensor& t) { return os; } -} // namespace executor -} // namespace torch +} // namespace etensor +} // namespace runtime +} // namespace executorch #endif // !USE_ATEN_LIB diff --git a/runtime/core/exec_aten/testing_util/tensor_util.h b/runtime/core/exec_aten/testing_util/tensor_util.h index 00f3c782c2f..8ea89615115 100644 --- a/runtime/core/exec_aten/testing_util/tensor_util.h +++ b/runtime/core/exec_aten/testing_util/tensor_util.h @@ -325,23 +325,25 @@ MATCHER_P(IsListEqualTo, other, "") { * These functions must be declared in the original namespaces of their * associated types so that C++ can find them. */ -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * Prints the ScalarType to the stream as a human-readable string. * * See also executorch::runtime::toString(ScalarType t) in ScalarTypeUtil.h. */ -std::ostream& operator<<(std::ostream& os, const exec_aten::ScalarType& t); +std::ostream& operator<<(std::ostream& os, const ScalarType& t); /** * Prints the Tensor to the stream as a human-readable string. */ -std::ostream& operator<<(std::ostream& os, const exec_aten::Tensor& t); +std::ostream& operator<<(std::ostream& os, const Tensor& t); -} // namespace executor -} // namespace torch +} // namespace etensor +} // namespace runtime +} // namespace executorch #endif // !USE_ATEN_LIB diff --git a/runtime/core/portable_type/bfloat16.h b/runtime/core/portable_type/bfloat16.h index e665e6152e3..c1ff250885a 100644 --- a/runtime/core/portable_type/bfloat16.h +++ b/runtime/core/portable_type/bfloat16.h @@ -14,8 +14,9 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { namespace internal { inline float f32_from_bits(uint16_t src) { @@ -26,12 +27,6 @@ inline float f32_from_bits(uint16_t src) { return res; } -inline uint16_t bits_from_f32(float src) { - uint32_t res = 0; - std::memcpy(&res, &src, sizeof(res)); - return res >> 16; -} - inline uint16_t round_to_nearest_even(float src) { if (std::isnan(src)) { return UINT16_C(0x7FC0); @@ -264,13 +259,22 @@ inline bool operator<(BFloat16& lhs, BFloat16& rhs) { return float(lhs) < float(rhs); } +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::BFloat16; } // namespace executor } // namespace torch namespace std { template <> -class numeric_limits { +class numeric_limits { public: static constexpr bool is_signed = true; static constexpr bool is_specialized = true; diff --git a/runtime/core/portable_type/bits_types.h b/runtime/core/portable_type/bits_types.h index 076ee642d75..cddffc485ec 100644 --- a/runtime/core/portable_type/bits_types.h +++ b/runtime/core/portable_type/bits_types.h @@ -9,8 +9,9 @@ #pragma once #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * bits1x8 is an uninterpreted dtype of a tensor with 1 bit (packed to byte @@ -65,5 +66,18 @@ struct alignas(2) bits16 { explicit bits16(uint16_t val) : val_(val) {} }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::bits16; +using ::executorch::runtime::etensor::bits1x8; +using ::executorch::runtime::etensor::bits2x4; +using ::executorch::runtime::etensor::bits4x2; +using ::executorch::runtime::etensor::bits8; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/complex.h b/runtime/core/portable_type/complex.h index 0d4684a992e..e89a19e54d7 100644 --- a/runtime/core/portable_type/complex.h +++ b/runtime/core/portable_type/complex.h @@ -10,8 +10,9 @@ #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * An implementation of complex numbers, compatible with c10/util/complex.h from @@ -32,5 +33,14 @@ struct alignas(4) complex { Half imag_; }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::complex; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/device.h b/runtime/core/portable_type/device.h index 7c09cfd29c3..d789df8a84d 100644 --- a/runtime/core/portable_type/device.h +++ b/runtime/core/portable_type/device.h @@ -10,8 +10,9 @@ #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /// Denotes the specific genre of compute device. /// Subset of https://github.com/pytorch/pytorch/blob/main/c10/core/Device.h @@ -59,5 +60,15 @@ struct Device final { DeviceIndex index_ = -1; }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::Device; +using ::executorch::runtime::etensor::DeviceType; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/half.h b/runtime/core/portable_type/half.h index 8987d82804b..fa40a80782f 100644 --- a/runtime/core/portable_type/half.h +++ b/runtime/core/portable_type/half.h @@ -32,8 +32,9 @@ #endif // __x86_64__ || _M_X64 || __i386 || _M_IX86 #endif // __GNUC__ || __clang__ -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * A half-precision floating point type, compatible with c10/util/Half.h from @@ -676,18 +677,26 @@ inline Half operator/(int64_t a, Half b) { static inline std::ostream& operator<<( std::ostream& out, - const torch::executor::Half& value) { + const executorch::runtime::etensor::Half& value) { out << (float)value; return out; } +} // namespace etensor +} // namespace runtime +} // namespace executorch +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::Half; } // namespace executor } // namespace torch namespace std { template <> -class numeric_limits { +class numeric_limits { public: static constexpr bool is_specialized = true; static constexpr bool is_signed = true; @@ -714,32 +723,41 @@ class numeric_limits { static constexpr auto traps = numeric_limits::traps; static constexpr auto tinyness_before = numeric_limits::tinyness_before; - static constexpr torch::executor::Half min() { - return torch::executor::Half(0x0400, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half min() { + return executorch::runtime::etensor::Half( + 0x0400, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half lowest() { - return torch::executor::Half(0xFBFF, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half lowest() { + return executorch::runtime::etensor::Half( + 0xFBFF, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half max() { - return torch::executor::Half(0x7BFF, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half max() { + return executorch::runtime::etensor::Half( + 0x7BFF, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half epsilon() { - return torch::executor::Half(0x1400, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half epsilon() { + return executorch::runtime::etensor::Half( + 0x1400, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half round_error() { - return torch::executor::Half(0x3800, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half round_error() { + return executorch::runtime::etensor::Half( + 0x3800, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half infinity() { - return torch::executor::Half(0x7C00, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half infinity() { + return executorch::runtime::etensor::Half( + 0x7C00, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half quiet_NaN() { - return torch::executor::Half(0x7E00, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half quiet_NaN() { + return executorch::runtime::etensor::Half( + 0x7E00, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half signaling_NaN() { - return torch::executor::Half(0x7D00, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half signaling_NaN() { + return executorch::runtime::etensor::Half( + 0x7D00, executorch::runtime::etensor::Half::from_bits()); } - static constexpr torch::executor::Half denorm_min() { - return torch::executor::Half(0x0001, torch::executor::Half::from_bits()); + static constexpr executorch::runtime::etensor::Half denorm_min() { + return executorch::runtime::etensor::Half( + 0x0001, executorch::runtime::etensor::Half::from_bits()); } }; diff --git a/runtime/core/portable_type/optional.h b/runtime/core/portable_type/optional.h index 3d8cb41eac8..21fe0d39267 100644 --- a/runtime/core/portable_type/optional.h +++ b/runtime/core/portable_type/optional.h @@ -12,8 +12,9 @@ #include #include // std::forward and other template magic checks -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /// Used to indicate an optional type with uninitialized state. struct nullopt_t final { @@ -177,5 +178,16 @@ class optional final { bool init_; }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::nullopt; +using ::executorch::runtime::etensor::nullopt_t; +using ::executorch::runtime::etensor::optional; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/qint_types.h b/runtime/core/portable_type/qint_types.h index f7c78e3a180..183675e1829 100644 --- a/runtime/core/portable_type/qint_types.h +++ b/runtime/core/portable_type/qint_types.h @@ -10,8 +10,9 @@ #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * qint8 is for signed 8 bit quantized Tensors @@ -65,5 +66,18 @@ struct alignas(1) quint2x4 { explicit quint2x4(uint8_t val) : val_(val) {} }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::qint32; +using ::executorch::runtime::etensor::qint8; +using ::executorch::runtime::etensor::quint2x4; +using ::executorch::runtime::etensor::quint4x2; +using ::executorch::runtime::etensor::quint8; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/scalar.h b/runtime/core/portable_type/scalar.h index 1147fee7cc9..0922cec6b95 100644 --- a/runtime/core/portable_type/scalar.h +++ b/runtime/core/portable_type/scalar.h @@ -16,8 +16,9 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * Represents a scalar value. @@ -109,5 +110,14 @@ ET_DEFINE_SCALAR_TO_METHOD(int64_t, Int) ET_DEFINE_SCALAR_TO_METHOD(bool, Bool) #undef ET_DEFINE_SCALAR_TO_METHOD +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::Scalar; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/scalar_type.h b/runtime/core/portable_type/scalar_type.h index 5b06cd6ec62..286aee3387c 100644 --- a/runtime/core/portable_type/scalar_type.h +++ b/runtime/core/portable_type/scalar_type.h @@ -43,8 +43,9 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * Calls the provided macro on every ScalarType, providing the C type and the @@ -98,5 +99,14 @@ enum class ScalarType : int8_t { NumOptions, }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::ScalarType; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/string_view.h b/runtime/core/portable_type/string_view.h index 47a9f335eb5..977a0f542d0 100644 --- a/runtime/core/portable_type/string_view.h +++ b/runtime/core/portable_type/string_view.h @@ -14,14 +14,11 @@ #include // TODO(T154113473): Document this file -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { -namespace { -constexpr std::size_t min(const std::size_t a, const std::size_t b) { - return (b < a) ? b : a; -} -} // namespace +namespace internal { /** * Reimplementation of std::string_view for C++11. @@ -128,7 +125,7 @@ class basic_string_view final { size_type copy(pointer dest, size_type count, size_type pos = 0) const { ET_CHECK_MSG(pos > size_, "basic_string_view::copy: out of range."); - size_type copy_length = min(count, size_ - pos); + size_type copy_length = min_(count, size_ - pos); for (auto iter = begin() + pos, end = iter + copy_length; iter != end;) { *(dest++) = *(iter++); } @@ -145,7 +142,7 @@ class basic_string_view final { constexpr int compare(basic_string_view rhs) const noexcept { #if __cpp_constexpr >= 201304 // if we are in C++14, write it iteratively. This is faster. - for (size_t i = 0, end = min(size(), rhs.size()); i < end; ++i) { + for (size_t i = 0, end = min_(size(), rhs.size()); i < end; ++i) { if (at_(i) < rhs.at_(i)) { return -1; } else if (at_(i) > rhs.at_(i)) { @@ -315,7 +312,7 @@ class basic_string_view final { } if (v.size() <= size()) { - pos = min(size() - v.size(), pos); + pos = min_(size() - v.size(), pos); do { if (v.at_(0) == at_(pos) && v.substr_(1).equals_(substr_(pos + 1, v.size() - 1))) { @@ -432,6 +429,10 @@ class basic_string_view final { } private: + static constexpr std::size_t min_(const std::size_t a, const std::size_t b) { + return (b < a) ? b : a; + } + static constexpr size_type strlen_(const_pointer str) noexcept { #if __cpp_constexpr >= 201304 // if we are in C++14, write it iteratively. This is faster. @@ -453,7 +454,7 @@ class basic_string_view final { constexpr basic_string_view substr_(size_type pos = 0, size_type count = npos) const { - return basic_string_view{begin_ + pos, min(count, size() - pos)}; + return basic_string_view{begin_ + pos, min_(count, size() - pos)}; } template @@ -485,7 +486,7 @@ class basic_string_view final { #if __cpp_constexpr >= 201304 // if we are in C++14, write it iteratively. This is faster. if (size() > 0) { - pos = min(size() - 1, pos); + pos = min_(size() - 1, pos); do { if (condition(at_(pos))) { return pos; @@ -570,7 +571,18 @@ inline void swap( lhs.swap(rhs); } -using string_view = basic_string_view; +} // namespace internal +using string_view = internal::basic_string_view; + +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::string_view; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/tensor.h b/runtime/core/portable_type/tensor.h index b7cf245a82f..775bccc1b52 100644 --- a/runtime/core/portable_type/tensor.h +++ b/runtime/core/portable_type/tensor.h @@ -12,8 +12,9 @@ #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * A minimal Tensor type whose API is a source compatible subset of at::Tensor. @@ -161,5 +162,14 @@ class Tensor { TensorImpl* impl_ = nullptr; }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::Tensor; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/tensor_impl.cpp b/runtime/core/portable_type/tensor_impl.cpp index ad0fa5868c1..2082b8a4c70 100644 --- a/runtime/core/portable_type/tensor_impl.cpp +++ b/runtime/core/portable_type/tensor_impl.cpp @@ -17,8 +17,9 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * Compute the number of elements based on the sizes of a tensor. @@ -119,5 +120,6 @@ Error TensorImpl::internal_resize_contiguous(ArrayRef new_sizes) { return Error::Ok; } -} // namespace executor -} // namespace torch +} // namespace etensor +} // namespace runtime +} // namespace executorch diff --git a/runtime/core/portable_type/tensor_impl.h b/runtime/core/portable_type/tensor_impl.h index c48149cd187..fd2fd124c28 100644 --- a/runtime/core/portable_type/tensor_impl.h +++ b/runtime/core/portable_type/tensor_impl.h @@ -24,8 +24,9 @@ class TensorResizerFriend; } // namespace runtime } // namespace executorch -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * Manages the storage behind an ETensor (torch::executor::Tensor). @@ -266,16 +267,18 @@ class TensorImpl { * Compute the number of elements based on the sizes of a tensor. */ ssize_t compute_numel( - const ::torch::executor::TensorImpl::SizesType* sizes, + const ::executorch::runtime::etensor::TensorImpl::SizesType* sizes, ssize_t dim); -} // namespace executor -} // namespace torch +} // namespace etensor +} // namespace runtime +} // namespace executorch -namespace executorch { -namespace runtime { +namespace torch { +namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using torch::executor::compute_numel; -} // namespace runtime -} // namespace executorch +using ::executorch::runtime::etensor::compute_numel; +using ::executorch::runtime::etensor::TensorImpl; +} // namespace executor +} // namespace torch diff --git a/runtime/core/portable_type/tensor_options.h b/runtime/core/portable_type/tensor_options.h index a6e604cf837..8b8f9848648 100644 --- a/runtime/core/portable_type/tensor_options.h +++ b/runtime/core/portable_type/tensor_options.h @@ -10,8 +10,9 @@ #include -namespace torch { -namespace executor { +namespace executorch { +namespace runtime { +namespace etensor { /** * Tensor data memory formats supported by ExecuTorch. This concept only exists @@ -45,5 +46,15 @@ enum class Layout : int8_t { */ Strided = 0, }; +} // namespace etensor +} // namespace runtime +} // namespace executorch + +namespace torch { +namespace executor { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch` namespaces. +using ::executorch::runtime::etensor::Layout; +using ::executorch::runtime::etensor::MemoryFormat; } // namespace executor } // namespace torch diff --git a/runtime/core/portable_type/test/bfloat16_test.cpp b/runtime/core/portable_type/test/bfloat16_test.cpp index 9ea53e6cba2..6b42a6e4a5e 100644 --- a/runtime/core/portable_type/test/bfloat16_test.cpp +++ b/runtime/core/portable_type/test/bfloat16_test.cpp @@ -1,8 +1,18 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + #include #include -using torch::executor::BFloat16; +using executorch::runtime::etensor::BFloat16; +using executorch::runtime::etensor::internal::f32_from_bits; +using executorch::runtime::etensor::internal::round_to_nearest_even; namespace { float float_from_bytes(uint32_t sign, uint32_t exponent, uint32_t fraction) { @@ -21,6 +31,13 @@ float float_from_bytes(uint32_t sign, uint32_t exponent, uint32_t fraction) { return res; } +// Opposite of f32_from_bits. +uint16_t bits_from_f32(float src) { + uint32_t res = 0; + std::memcpy(&res, &src, sizeof(res)); + return res >> 16; +} + TEST(BFloat16Conversion, FloatToBFloat16AndBack) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays) float in[100]; @@ -35,8 +52,8 @@ TEST(BFloat16Conversion, FloatToBFloat16AndBack) { float out[100]; for (int i = 0; i < 100; ++i) { - bfloats[i].x = torch::executor::internal::bits_from_f32(in[i]); - out[i] = torch::executor::internal::f32_from_bits(bfloats[i].x); + bfloats[i].x = bits_from_f32(in[i]); + out[i] = f32_from_bits(bfloats[i].x); // The relative error should be less than 1/(2^7) since BFloat16 // has 7 bits mantissa. @@ -58,8 +75,8 @@ TEST(BFloat16Conversion, FloatToBFloat16RNEAndBack) { float out[100]; for (int i = 0; i < 100; ++i) { - bfloats[i].x = torch::executor::internal::round_to_nearest_even(in[i]); - out[i] = torch::executor::internal::f32_from_bits(bfloats[i].x); + bfloats[i].x = round_to_nearest_even(in[i]); + out[i] = f32_from_bits(bfloats[i].x); // The relative error should be less than 1/(2^7) since BFloat16 // has 7 bits mantissa. @@ -72,7 +89,7 @@ TEST(BFloat16Conversion, NaN) { EXPECT_TRUE(std::isnan(inNaN)); BFloat16 a = BFloat16(inNaN); - float out = torch::executor::internal::f32_from_bits(a.x); + float out = f32_from_bits(a.x); EXPECT_TRUE(std::isnan(out)); } @@ -82,7 +99,7 @@ TEST(BFloat16Conversion, Inf) { EXPECT_TRUE(std::isinf(inInf)); BFloat16 a = BFloat16(inInf); - float out = torch::executor::internal::f32_from_bits(a.x); + float out = f32_from_bits(a.x); EXPECT_TRUE(std::isinf(out)); } @@ -91,7 +108,7 @@ TEST(BFloat16Conversion, SmallestDenormal) { float in = std::numeric_limits::denorm_min(); // The smallest non-zero // subnormal number BFloat16 a = BFloat16(in); - float out = torch::executor::internal::f32_from_bits(a.x); + float out = f32_from_bits(a.x); EXPECT_FLOAT_EQ(in, out); } @@ -112,10 +129,10 @@ TEST(BFloat16Math, Addition) { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) BFloat16 b; - b.x = torch::executor::internal::bits_from_f32(input); + b.x = bits_from_f32(input); b = b + b; - float res = torch::executor::internal::f32_from_bits(b.x); + float res = f32_from_bits(b.x); EXPECT_EQ(res, expected); } @@ -135,10 +152,10 @@ TEST(BFloat16Math, Subtraction) { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) BFloat16 b; - b.x = torch::executor::internal::bits_from_f32(input); + b.x = bits_from_f32(input); b = b - 5; - float res = torch::executor::internal::f32_from_bits(b.x); + float res = f32_from_bits(b.x); EXPECT_EQ(res, expected); } @@ -174,7 +191,7 @@ class BFloat16Test : public ::testing::Test, TEST_P(BFloat16Test, BFloat16RNETest) { float value = BinaryToFloat(GetParam().input); - uint16_t rounded = torch::executor::internal::round_to_nearest_even(value); + uint16_t rounded = round_to_nearest_even(value); EXPECT_EQ(GetParam().rne, rounded); } diff --git a/runtime/core/portable_type/test/half_test.cpp b/runtime/core/portable_type/test/half_test.cpp index 18ab6cb4b22..0d5dca0e958 100644 --- a/runtime/core/portable_type/test/half_test.cpp +++ b/runtime/core/portable_type/test/half_test.cpp @@ -11,8 +11,8 @@ #include #include -namespace torch { -namespace executor { +using executorch::runtime::etensor::Half; + namespace { /** @@ -211,6 +211,3 @@ TEST(HalfTest, ArithmeticInt64Div) { EXPECT_TRUE(closeEnoughFloat16(ah / b, af / b)); EXPECT_TRUE(closeEnoughFloat16(b / ah, b / af)); } - -} // namespace executor -} // namespace torch diff --git a/runtime/core/portable_type/test/optional_test.cpp b/runtime/core/portable_type/test/optional_test.cpp index 11241aedbb1..fe27186bbf2 100644 --- a/runtime/core/portable_type/test/optional_test.cpp +++ b/runtime/core/portable_type/test/optional_test.cpp @@ -15,9 +15,8 @@ #include using namespace ::testing; - -namespace torch { -namespace executor { +using executorch::runtime::etensor::nullopt; +using executorch::runtime::etensor::optional; // Test that optional::value_type matches the template parameter type. static_assert( @@ -141,6 +140,3 @@ TEST(TestOptional, ImplicitReturnOfNullopt) { auto o = function_returning_nullopt(); EXPECT_FALSE(o.has_value()); } - -} // namespace executor -} // namespace torch diff --git a/runtime/core/portable_type/test/scalar_test.cpp b/runtime/core/portable_type/test/scalar_test.cpp index 4736d3c2a9e..fd211f916c3 100644 --- a/runtime/core/portable_type/test/scalar_test.cpp +++ b/runtime/core/portable_type/test/scalar_test.cpp @@ -10,8 +10,7 @@ #include #include -namespace torch { -namespace executor { +using executorch::runtime::etensor::Scalar; TEST(ScalarTest, ToScalarType) { Scalar s_d((double)3.141); @@ -46,5 +45,3 @@ TEST(ScalarTest, IntConstructor) { EXPECT_EQ(s_int.to(), s_int32.to()); EXPECT_EQ(s_int32.to(), s_int64.to()); } -} // namespace executor -} // namespace torch diff --git a/runtime/core/portable_type/test/tensor_impl_test.cpp b/runtime/core/portable_type/test/tensor_impl_test.cpp index 77dd01ea23f..bd5f82c5d1f 100644 --- a/runtime/core/portable_type/test/tensor_impl_test.cpp +++ b/runtime/core/portable_type/test/tensor_impl_test.cpp @@ -17,20 +17,22 @@ using namespace ::testing; -namespace torch { -namespace executor { - +using executorch::runtime::ArrayRef; +using executorch::runtime::Error; +using executorch::runtime::TensorShapeDynamism; +using executorch::runtime::etensor::ScalarType; +using executorch::runtime::etensor::TensorImpl; using SizesType = TensorImpl::SizesType; using DimOrderType = TensorImpl::DimOrderType; using StridesType = TensorImpl::StridesType; -using torch::executor::internal::resize_tensor_impl; +using executorch::runtime::internal::resize_tensor_impl; class TensorImplTest : public ::testing::Test { protected: void SetUp() override { // Since these tests cause ET_LOG to be called, the PAL must be initialized // first. - runtime_init(); + executorch::runtime::runtime_init(); } }; @@ -446,6 +448,3 @@ TEST_F(TensorImplTest, TestResizingTensorToZeroAndBack) { EXPECT_GT(t.numel(), 0); EXPECT_EQ(t.data(), data); } - -} // namespace executor -} // namespace torch diff --git a/runtime/core/portable_type/test/tensor_test.cpp b/runtime/core/portable_type/test/tensor_test.cpp index 7a772cd0769..714cdc25661 100644 --- a/runtime/core/portable_type/test/tensor_test.cpp +++ b/runtime/core/portable_type/test/tensor_test.cpp @@ -13,15 +13,16 @@ #include #include -namespace torch { -namespace executor { +using executorch::runtime::etensor::ScalarType; +using executorch::runtime::etensor::Tensor; +using executorch::runtime::etensor::TensorImpl; class TensorTest : public ::testing::Test { protected: void SetUp() override { // Since these tests cause ET_LOG to be called, the PAL must be initialized // first. - runtime_init(); + executorch::runtime::runtime_init(); } }; @@ -77,6 +78,3 @@ TEST_F(TensorTest, ModifyDataOfConstTensor) { EXPECT_EQ(a.scalar_type(), ScalarType::Int); EXPECT_EQ(a.const_data_ptr()[0], 0); } - -} // namespace executor -} // namespace torch