diff --git a/backends/xnnpack/runtime/XNNCompiler.cpp b/backends/xnnpack/runtime/XNNCompiler.cpp index a4bf48cd46a..1080da0beae 100644 --- a/backends/xnnpack/runtime/XNNCompiler.cpp +++ b/backends/xnnpack/runtime/XNNCompiler.cpp @@ -16,11 +16,15 @@ #pragma clang diagnostic ignored "-Wmissing-prototypes" #pragma clang diagnostic ignored "-Wglobal-constructors" -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { +using executorch::runtime::Error; +using executorch::runtime::MemoryAllocator; +using executorch::runtime::Result; + /* * Provide compile-time allocation. */ @@ -1811,5 +1815,5 @@ ET_NODISCARD Error XNNCompiler::compileModel( } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/XNNCompiler.h b/backends/xnnpack/runtime/XNNCompiler.h index 94deda52635..e66cb791ecb 100644 --- a/backends/xnnpack/runtime/XNNCompiler.h +++ b/backends/xnnpack/runtime/XNNCompiler.h @@ -15,8 +15,8 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { @@ -25,15 +25,15 @@ class XNNCompiler { // Takes Flatbuffer Serialized XNNPACK Model and rebuilds the xnn-subgraph // returns an executor object that holds the xnn runtime object which we // can then use to set inputs and run inference using the xnn graph. - ET_NODISCARD static Error compileModel( + ET_NODISCARD static executorch::runtime::Error compileModel( const void* buffer_pointer, size_t num_bytes, XNNExecutor* executor, - MemoryAllocator* runtime_allocator, + executorch::runtime::MemoryAllocator* runtime_allocator, xnn_workspace_t workspace); }; } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/XNNExecutor.cpp b/backends/xnnpack/runtime/XNNExecutor.cpp index 2ca1c7d4b2f..1ba549bb8d7 100644 --- a/backends/xnnpack/runtime/XNNExecutor.cpp +++ b/backends/xnnpack/runtime/XNNExecutor.cpp @@ -8,14 +8,19 @@ #include -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { -using Tensor = exec_aten::Tensor; -using ScalarType = exec_aten::ScalarType; -using SizesType = exec_aten::SizesType; +using executorch::aten::ScalarType; +using executorch::aten::SizesType; +using executorch::aten::Tensor; +using executorch::runtime::BackendExecutionContext; +using executorch::runtime::Error; +using executorch::runtime::EValue; +using executorch::runtime::is_contiguous_dim_order; +using executorch::runtime::kTensorDimensionLimit; /** * Initializes the XNNExecutor with the runtime and given number of @@ -204,7 +209,7 @@ ET_NODISCARD Error XNNExecutor::resize_outputs(EValue** args) const { expected_output_size[d] = static_cast(dims[d]); } - exec_aten::ArrayRef output_size{ + executorch::aten::ArrayRef output_size{ expected_output_size, static_cast(num_dim)}; ET_LOG(Debug, "Resizing output tensor to a new shape"); @@ -231,5 +236,5 @@ ET_NODISCARD Error XNNExecutor::resize_outputs(EValue** args) const { } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/XNNExecutor.h b/backends/xnnpack/runtime/XNNExecutor.h index c35307cb912..68ee18609e3 100644 --- a/backends/xnnpack/runtime/XNNExecutor.h +++ b/backends/xnnpack/runtime/XNNExecutor.h @@ -19,8 +19,8 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { @@ -51,7 +51,7 @@ class XNNExecutor { * The input/output ids are expected to be sorted in order of their * flatbuffer id_outs */ - ET_NODISCARD Error initialize( + ET_NODISCARD executorch::runtime::Error initialize( xnn_runtime_t runtime, std::vector&& input_ids, std::vector&& output_ids); @@ -62,24 +62,27 @@ class XNNExecutor { * input shapes will be propagated through the runtime, and perform * any additional memory planning as needed */ - ET_NODISCARD Error prepare_args(EValue** args); + ET_NODISCARD executorch::runtime::Error prepare_args( + executorch::runtime::EValue** args); /** * Executes the graph using the args prepared at prepare_args(). */ - ET_NODISCARD Error forward(BackendExecutionContext& context); + ET_NODISCARD executorch::runtime::Error forward( + executorch::runtime::BackendExecutionContext& context); /** * Prepares the outputs to be returned by the delegate * * Performs any post processing of outputs like tensor resizing */ - ET_NODISCARD Error resize_outputs(EValue** args) const; + ET_NODISCARD executorch::runtime::Error resize_outputs( + executorch::runtime::EValue** args) const; friend class XNNCompiler; }; } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/XNNHeader.cpp b/backends/xnnpack/runtime/XNNHeader.cpp index 5904792ee43..9397948c55d 100644 --- a/backends/xnnpack/runtime/XNNHeader.cpp +++ b/backends/xnnpack/runtime/XNNHeader.cpp @@ -15,11 +15,14 @@ #pragma clang diagnostic ignored "-Wdeprecated" -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { +using executorch::runtime::Error; +using executorch::runtime::Result; + namespace { /// Interprets the 8 bytes at `data` as a little-endian uint64_t. uint64_t GetUInt64LE(const uint8_t* data) { @@ -73,5 +76,5 @@ constexpr char XNNHeader::kMagic[kMagicSize]; } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/XNNHeader.h b/backends/xnnpack/runtime/XNNHeader.h index 66922a95f8a..f835a6f6428 100644 --- a/backends/xnnpack/runtime/XNNHeader.h +++ b/backends/xnnpack/runtime/XNNHeader.h @@ -10,8 +10,8 @@ #include -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { @@ -98,7 +98,9 @@ struct XNNHeader { * error if size was too short, if the header was not found, or if the * header appeared to be corrupt. */ - static Result Parse(const void* data, size_t size); + static executorch::runtime::Result Parse( + const void* data, + size_t size); /** * The offset in bytes to the beginning of the flatbuffer data. @@ -121,5 +123,5 @@ struct XNNHeader { } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/XNNPACKBackend.cpp b/backends/xnnpack/runtime/XNNPACKBackend.cpp index c817c010e29..b4367e40c4c 100644 --- a/backends/xnnpack/runtime/XNNPACKBackend.cpp +++ b/backends/xnnpack/runtime/XNNPACKBackend.cpp @@ -17,8 +17,19 @@ #pragma clang diagnostic ignored "-Wglobal-constructors" -namespace torch { -namespace executor { +namespace executorch { +namespace backends { + +using executorch::runtime::ArrayRef; +using executorch::runtime::Backend; +using executorch::runtime::BackendExecutionContext; +using executorch::runtime::BackendInitContext; +using executorch::runtime::CompileSpec; +using executorch::runtime::DelegateHandle; +using executorch::runtime::Error; +using executorch::runtime::EValue; +using executorch::runtime::FreeableBuffer; +using executorch::runtime::Result; class XnnpackBackend final : public ::executorch::runtime::BackendInterface { public: @@ -145,5 +156,5 @@ Backend backend{"XnnpackBackend", &cls}; static auto success_with_compiler = register_backend(backend); } // namespace -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/XNNStatus.h b/backends/xnnpack/runtime/XNNStatus.h index 7feaa2f89af..d6d9f2274e3 100644 --- a/backends/xnnpack/runtime/XNNStatus.h +++ b/backends/xnnpack/runtime/XNNStatus.h @@ -11,8 +11,8 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { @@ -34,5 +34,5 @@ inline const char* xnn_status_to_string(enum xnn_status type) { } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/profiling/XNNProfiler.cpp b/backends/xnnpack/runtime/profiling/XNNProfiler.cpp index 1bde8a37e50..72614083c74 100644 --- a/backends/xnnpack/runtime/profiling/XNNProfiler.cpp +++ b/backends/xnnpack/runtime/profiling/XNNProfiler.cpp @@ -22,9 +22,13 @@ #include // NOLINTEND -namespace torch::executor::xnnpack::delegate::profiling { +namespace executorch::backends::xnnpack::delegate::profiling { + +using executorch::runtime::Error; +using executorch::runtime::EventTracer; #if defined(ET_EVENT_TRACER_ENABLED) || defined(ENABLE_XNNPACK_PROFILING) + XNNProfiler::XNNProfiler() : state_(XNNProfilerState::Uninitialized), run_count_(0) {} @@ -210,10 +214,10 @@ void XNNProfiler::submit_trace() { auto end_time = time + interval_ticks; - torch::executor::event_tracer_log_profiling_delegate( + executorch::runtime::event_tracer_log_profiling_delegate( event_tracer_, name_formatted.c_str(), - /*delegate_debug_id=*/static_cast(-1), + /*delegate_debug_id=*/static_cast(-1), time, end_time); @@ -246,4 +250,4 @@ Error XNNProfiler::end() { #endif -} // namespace torch::executor::xnnpack::delegate::profiling +} // namespace executorch::backends::xnnpack::delegate::profiling diff --git a/backends/xnnpack/runtime/profiling/XNNProfiler.h b/backends/xnnpack/runtime/profiling/XNNProfiler.h index 29e3633197c..2eaec0ad115 100644 --- a/backends/xnnpack/runtime/profiling/XNNProfiler.h +++ b/backends/xnnpack/runtime/profiling/XNNProfiler.h @@ -14,8 +14,8 @@ #include #include -namespace torch { -namespace executor { +namespace executorch { +namespace backends { namespace xnnpack { namespace delegate { namespace profiling { @@ -30,24 +30,25 @@ class XNNProfiler { * Initialize the profiler. This must be called after model is * compiled and before calling begin_execution. */ - Error initialize(xnn_runtime_t runtime); + executorch::runtime::Error initialize(xnn_runtime_t runtime); /** * Start a new profiling session. This is typically invoked * immediately before invoking the XNNPACK runtime as part * of a forward pass. */ - Error start(EventTracer* event_tracer); + executorch::runtime::Error start( + executorch::runtime::EventTracer* event_tracer); /** * End a profiling session. This is typically invoked immediately * after the XNNPACK runtime invocation completes. */ - Error end(); + executorch::runtime::Error end(); private: #if defined(ET_EVENT_TRACER_ENABLED) || defined(ENABLE_XNNPACK_PROFILING) - EventTracer* event_tracer_; + executorch::runtime::EventTracer* event_tracer_; xnn_runtime_t runtime_; XNNProfilerState state_; @@ -64,9 +65,9 @@ class XNNProfiler { std::vector op_timings_sum_; #endif - Error get_runtime_operator_names(); - Error get_runtime_num_operators(); - Error get_runtime_operator_timings(); + executorch::runtime::Error get_runtime_operator_names(); + executorch::runtime::Error get_runtime_num_operators(); + executorch::runtime::Error get_runtime_operator_timings(); void log_operator_timings(); @@ -80,5 +81,5 @@ class XNNProfiler { } // namespace profiling } // namespace delegate } // namespace xnnpack -} // namespace executor -} // namespace torch +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/utils/utils.cpp b/backends/xnnpack/runtime/utils/utils.cpp index 1b88601c3f7..bbcb8bc071c 100644 --- a/backends/xnnpack/runtime/utils/utils.cpp +++ b/backends/xnnpack/runtime/utils/utils.cpp @@ -10,12 +10,14 @@ #include #include -namespace torch { -namespace executor { -namespace qnnpack_utils { +namespace executorch { +namespace backends { +namespace xnnpack { +namespace utils { -using Tensor = exec_aten::Tensor; -using ScalarType = exec_aten::ScalarType; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::Error; constexpr float SMALL_SCALE_THRESHOLD = 6.1e-5f; @@ -222,6 +224,8 @@ void quantize_tensor_arm64_q8_wrapper( quantize_tensor_arm64_q8(in, out, N, scale, zero_point); } #endif -} // namespace qnnpack_utils -} // namespace executor -} // namespace torch + +} // namespace utils +} // namespace xnnpack +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/runtime/utils/utils.h b/backends/xnnpack/runtime/utils/utils.h index c341b4c2d7d..2eb079f0b0c 100644 --- a/backends/xnnpack/runtime/utils/utils.h +++ b/backends/xnnpack/runtime/utils/utils.h @@ -19,16 +19,17 @@ #include #endif -namespace torch { -namespace executor { -namespace qnnpack_utils { +namespace executorch { +namespace backends { +namespace xnnpack { +namespace utils { struct QuantizationParams { double scale; int32_t zero_point; }; -Error ChooseQuantizationParams( +executorch::runtime::Error ChooseQuantizationParams( float min, float max, int32_t qmin, @@ -125,9 +126,9 @@ void quantize_tensor_arm64_q8_wrapper( #endif /* __aarch64__ */ template -Error QuantizePerTensor( - const exec_aten::Tensor& rtensor, - exec_aten::Tensor& qtensor, +executorch::runtime::Error QuantizePerTensor( + const executorch::aten::Tensor& rtensor, + executorch::aten::Tensor& qtensor, double scale, int zero_point) { const float* rdata = rtensor.const_data_ptr(); @@ -151,17 +152,18 @@ Error QuantizePerTensor( qdata[i] = quantize_val(scale, zero_point, rdata[i]); } #endif /* __aarch64__ */ - return Error::Ok; + return executorch::runtime::Error::Ok; } -Error GenerateRequantizationScale( - const exec_aten::Tensor& weight_scales, +executorch::runtime::Error GenerateRequantizationScale( + const executorch::aten::Tensor& weight_scales, float input_scale, float output_scale, std::vector& requant_scales); -std::pair GetMinMax(const exec_aten::Tensor& ft); +std::pair GetMinMax(const executorch::aten::Tensor& ft); -} // namespace qnnpack_utils -} // namespace executor -} // namespace torch +} // namespace utils +} // namespace xnnpack +} // namespace backends +} // namespace executorch diff --git a/backends/xnnpack/test/runtime/test_runtime_utils.cpp b/backends/xnnpack/test/runtime/test_runtime_utils.cpp index c057b32ecea..7116a6f2fe5 100644 --- a/backends/xnnpack/test/runtime/test_runtime_utils.cpp +++ b/backends/xnnpack/test/runtime/test_runtime_utils.cpp @@ -8,26 +8,27 @@ #include +#include #include #include #include #include #include -#include "executorch/backends/xnnpack/runtime/utils/utils.h" using namespace ::testing; -using exec_aten::ScalarType; -using exec_aten::Tensor; -using torch::executor::Error; -using torch::executor::testing::TensorFactory; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::Error; +using executorch::runtime::testing::TensorFactory; +namespace utils = executorch::backends::xnnpack::utils; TEST(TestUtils, choose_quantization_params) { Error e; - torch::executor::qnnpack_utils::QuantizationParams qparams; + utils::QuantizationParams qparams; float min = -128.0 * 10.0; float max = +127.0 * 10.0; - e = torch::executor::qnnpack_utils::ChooseQuantizationParams( + e = utils::ChooseQuantizationParams( min, max, 0, 255, qparams, false, false, false); ASSERT_EQ(e, Error::Ok); ASSERT_EQ(qparams.zero_point, 128); @@ -35,12 +36,12 @@ TEST(TestUtils, choose_quantization_params) { } TEST(TestUtils, choose_quantization_params_fails) { - torch::executor::runtime_init(); + executorch::runtime::runtime_init(); Error e; - torch::executor::qnnpack_utils::QuantizationParams qparams; + utils::QuantizationParams qparams; float min = -128.0 * 10.0; float max = +127.0 * 10.0; - e = torch::executor::qnnpack_utils::ChooseQuantizationParams( + e = utils::ChooseQuantizationParams( max, min, 0, 255, qparams, false, false, false); ASSERT_EQ(e, Error::Internal); } @@ -58,9 +59,8 @@ TEST(TestUtils, quantize_per_tensor) { at_tensor, scale, zero_point, at::ScalarType::QUInt8); Tensor expected = tfo.zeros_like(output); at_expected = at_expected.contiguous(); - torch::util::alias_etensor_to_attensor(at_expected, expected); - Error e = torch::executor::qnnpack_utils::QuantizePerTensor( - input, output, scale, zero_point); + executorch::extension::alias_etensor_to_attensor(at_expected, expected); + Error e = utils::QuantizePerTensor(input, output, scale, zero_point); ASSERT_EQ(e, Error::Ok); EXPECT_TENSOR_EQ(output, expected); } @@ -71,7 +71,7 @@ TEST(TestUtils, generate_requantizeation_scale) { float input_scale = 2.0; float output_scale = 3.0; std::vector req_scales(15, 0); - Error e = torch::executor::qnnpack_utils::GenerateRequantizationScale( + Error e = utils::GenerateRequantizationScale( weight_scales, input_scale, output_scale, req_scales); ASSERT_EQ(e, Error::Ok); for (auto m : req_scales) { @@ -85,14 +85,14 @@ TEST(TestUtils, get_min_max) { float val = 4.12345; const Tensor ft = tf.full({3, 5}, val); - std::tie(min, max) = torch::executor::qnnpack_utils::GetMinMax(ft); + std::tie(min, max) = utils::GetMinMax(ft); EXPECT_FLOAT_EQ(min, val); EXPECT_FLOAT_EQ(max, val); const Tensor ft_min = tf.make( {2, 1}, {std::numeric_limits::min(), std::numeric_limits::max()}); - std::tie(min, max) = torch::executor::qnnpack_utils::GetMinMax(ft_min); + std::tie(min, max) = utils::GetMinMax(ft_min); EXPECT_FLOAT_EQ(min, std::numeric_limits::min()); EXPECT_FLOAT_EQ(max, std::numeric_limits::max()); @@ -100,12 +100,12 @@ TEST(TestUtils, get_min_max) { {2, 1}, {std::numeric_limits::lowest(), std::numeric_limits::max()}); - std::tie(min, max) = torch::executor::qnnpack_utils::GetMinMax(ft_lowest); + std::tie(min, max) = utils::GetMinMax(ft_lowest); EXPECT_FLOAT_EQ(min, std::numeric_limits::lowest()); EXPECT_FLOAT_EQ(max, std::numeric_limits::max()); const Tensor ft_random = tf.make({5, 1}, {-2.2, -1.1, 0, 1.1, 2.2}); - std::tie(min, max) = torch::executor::qnnpack_utils::GetMinMax(ft_random); + std::tie(min, max) = utils::GetMinMax(ft_random); EXPECT_FLOAT_EQ(min, -2.2); EXPECT_FLOAT_EQ(max, 2.2); } diff --git a/backends/xnnpack/test/runtime/test_xnnexecutor.cpp b/backends/xnnpack/test/runtime/test_xnnexecutor.cpp index 6bdd3c953dd..a5a26004b49 100644 --- a/backends/xnnpack/test/runtime/test_xnnexecutor.cpp +++ b/backends/xnnpack/test/runtime/test_xnnexecutor.cpp @@ -11,10 +11,10 @@ #include #include -using torch::executor::Error; -using torch::executor::EValue; -using torch::executor::testing::TensorFactory; -using torch::executor::xnnpack::delegate::XNNExecutor; +using executorch::backends::xnnpack::delegate::XNNExecutor; +using executorch::runtime::Error; +using executorch::runtime::EValue; +using executorch::runtime::testing::TensorFactory; TEST(XNNExecutorTest, ArgumentWithTooManyDimensions) { XNNExecutor executor; @@ -76,7 +76,7 @@ TEST(XNNExecutorTest, ArgumentWithTooManyDimensions) { 1, }), Error::Ok); - TensorFactory tf; + TensorFactory tf; auto input_tensor = tf.make({1, 1, 1, 1, 1, 1, 1, 1, 1}, {42}); ASSERT_EQ(input_tensor.dim(), 9); auto output_tensor = tf.make(