diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index 3bdbb33d59b..4677c69206f 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -469,6 +469,16 @@ - arg_meta: null kernel_name: impl::HiFi::quantized_linear_per_tensor_out +- func: cadence::im2row.out(Tensor input, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, Tensor in_zero_point, bool channel_last=False, *, Tensor(a!) out) -> Tensor(a!) + kernels: + - arg_meta: null + kernel_name: impl::HiFi::native::im2row_out + +- func: cadence::im2row.per_tensor_out(Tensor input, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, int in_zero_point, bool channel_last=False, *, Tensor(a!) out) -> Tensor(a!) + kernels: + - arg_meta: null + kernel_name: impl::HiFi::native::im2row_per_tensor_out + - func: cadence::quantized_linear_asym8sxasym8s_asym8s.per_tensor_out(Tensor src, Tensor weight, Tensor bias, int src_zero_point, int weight_zero_point, int out_multiplier, int out_shift, int out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!) kernels: - arg_meta: null diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 936e28e2241..c366cecbe0c 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -18,6 +18,7 @@ add_library( ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_minimum_maximum_f32.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index 08343e2528b..6a3dcd1d245 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -196,6 +196,28 @@ extern "C" WORD32 xa_nn_elm_where_broadcast_4D_f32xf32_f32( const unsigned char* __restrict__ p_condition, const WORD32* const p_condition_shape); +extern "C" WORD32 xa_nn_im2row_quantized( + const WORD8* __restrict__ data_im, + const WORD32 in_zero_point, + /* input parameters*/ + const WORD32 channels, + const WORD32 height, + const WORD32 width, + /* output parameters */ + const WORD32 out_height, + const WORD32 out_width, + /* convolution parameters */ + const WORD32 kernel_h, + const WORD32 kernel_w, + const WORD32 pad_h, + const WORD32 pad_w, + const WORD32 stride_h, + const WORD32 stride_w, + const WORD32 dilation_h, + const WORD32 dilation_w, + WORD8* __restrict__ data_col, + WORD32 channels_last); + extern "C" WORD32 xa_nn_reduce_mean_4D_f32_f32( FLOAT32* __restrict__ p_out, const WORD32* const p_out_shape, diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 26555da9760..256d9666614 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -16,6 +16,7 @@ include(${EXECUTORCH_ROOT}/tools/cmake/Codegen.cmake) # ATen compliant ops that are needed to run this model. set(_aten_ops__srcs + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_im2row_out.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_atan2.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_bitwise_and.cpp" diff --git a/backends/cadence/hifi/operators/op_im2row_out.cpp b/backends/cadence/hifi/operators/op_im2row_out.cpp new file mode 100644 index 00000000000..d42699b8a38 --- /dev/null +++ b/backends/cadence/hifi/operators/op_im2row_out.cpp @@ -0,0 +1,429 @@ +// (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1))) + +using ::executorch::aten::IntArrayRef; +using ::executorch::aten::ScalarType; +using ::executorch::aten::Tensor; +using ::executorch::runtime::KernelRuntimeContext; + +namespace impl { +namespace HiFi { +namespace native { + +template +__attribute__((always_inline)) void im2row_( + const T* __restrict__ data_im, + const int32_t in_zero_point, + /* input parameters*/ + const int32_t channels, + const int32_t height, + const int32_t width, + /* output parameters */ + const int32_t out_height, + const int32_t out_width, + /* convolution parameters */ + const int32_t kernel_h, + const int32_t kernel_w, + const int32_t pad_h, + const int32_t pad_w, + const int32_t stride_h, + const int32_t stride_w, + const int32_t dilation_h, + const int32_t dilation_w, + T* __restrict__ data_col, + bool channels_last) { + // Consider convolving the input image of dimensions channels * height * width + // (or height * width * channels for NHWC layout) with a filter of dimensions + // channels * kernels_h * kernels_w. Assume that this convolution will produce + // an output of dimensinos out_height x out_width. For each point the output, + // im2row takes the data from the input that is used in the computation of + // that output point, and flattens it into a vector of size channels_col = + // channels * kernel_h * kernel_w. The output of im2row will therefore be a 2D + // array of size (out_height * out_width) x channels_col + const int32_t channels_col = channels * kernel_h * kernel_w; + + // If the layout is NHWC, we can copy 'channels' worth of contiguous data + // points when performing im2row. + if (channels_last) { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + // Each point in the output domain is the result of applying a filter of + // size kernel_h x kernel_w x channels on the input. But since channels + // is contiguous, we will not explicitly have a loop for it. + for (int _kh = 0; _kh < kernel_h; ++_kh) { + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + for (int _kw = 0; _kw < kernel_w; ++_kw) { + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + + // h_im and w_im are the actual height and width coordinates of the + // input tensor from where we need to copy 'channels' points. + const T* __restrict__ slice_im = + data_im + (h_im * width + w_im) * channels; + T* __restrict__ slice_col = data_col + i_col * channels_col + + (_kh * kernel_w + _kw) * channels; + // If the coordinates were within the input domain, we copy + // 'channels' contiguous values. Otherwise we will fill the output + // with 0's. + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + std::memcpy(slice_col, slice_im, channels * sizeof(T)); + } else { + std::fill_n(slice_col, channels, T(in_zero_point)); + } + } + } + } + } + } else { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + + // Each point in the output domain is the result of applying a filter + // of size chanenls * kernel_h x kernel_w on the input + for (int _c = 0; _c < channels; ++_c) { + for (int _kh = 0; _kh < kernel_h; ++_kh) { + for (int _kw = 0; _kw < kernel_w; ++_kw) { + // c_col is the linearized access in the channels_col vector. + int32_t c_col = (_c * kernel_h + _kh) * kernel_w + _kw; + // h_im and w_im are the actual height and width coordinates of + // the input tensor that we need to copy to the output. + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + // If the current data access is within the input tensor, copy the + // value + data_col[i_col * channels_col + c_col] = + (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) + ? data_im[(_c * height + h_im) * width + w_im] + : static_cast(in_zero_point); + } + } + } + } + } + } +} + +void im2row_out( + __ET_UNUSED KernelRuntimeContext& ctx, + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride, + const Tensor& in_zero_point, + bool channel_last, + Tensor& out) { + // Compute the input tensor's dims + bool unit_height = input.dim() == 3; + const int32_t batch_size = input.size(0); + const int32_t in_c = + channel_last ? input.size(3 - unit_height) : input.size(1); + const int32_t in_h = + unit_height ? 1 : (channel_last ? input.size(1) : input.size(2)); + const int32_t in_w = + channel_last ? input.size(2 - unit_height) : input.size(3 - unit_height); + + // Get the kernel parameters + int32_t kernel_h = kernel_size[0]; + int32_t kernel_w = kernel_size[1]; + int32_t dilation_h = dilation[0]; + int32_t dilation_w = dilation[1]; + int32_t pad_h = padding[0]; + int32_t pad_w = padding[1]; + int32_t stride_h = stride[0]; + int32_t stride_w = stride[1]; + + // If we were to apply a convolution on the input tensor, compute the output + // height and width. + int32_t out_h = + (in_h + 2 * pad_h - dilation_h * (kernel_h - 1) - 1) / stride_h + 1; + int32_t out_w = + (in_w + 2 * pad_w - dilation_w * (kernel_w - 1) - 1) / stride_w + 1; + + ET_DCHECK_MSG( + (out_h * out_w) == out.size(1), "dimension mismatch for output"); + ET_DCHECK_MSG( + (kernel_h * kernel_w * in_c) == out.size(2), + "dimension mismatch for output"); + // Check if the input is per-tensor quantized or per-channel quantized. The + // zero point for each batch could differ for per-channel quantized input. + bool per_tensor_quantized = in_zero_point.numel() == 1; + + bool optimized = false; + if (input.scalar_type() == ScalarType::Char || + input.scalar_type() == ScalarType::Byte) + optimized = true; + + if (!optimized) { + WORD8* ptr1 = (WORD8*)::impl::HiFi::kernels::allocate_temp_memory( + ctx, ((batch_size * in_c * in_h * in_w) + 8) * sizeof(WORD8)); + + WORD8* pin = (WORD8*)ALIGN_PTR(ptr1, 8); + + WORD32 p_inp_shape[4]; + p_inp_shape[0] = input.size(0); + p_inp_shape[1] = in_c; + p_inp_shape[2] = in_h; + p_inp_shape[3] = in_w; + + WORD32 p_out_shape[4]; + p_out_shape[0] = input.size(0); + p_out_shape[1] = in_h; + p_out_shape[2] = in_w; + p_out_shape[3] = in_c; + + WORD32 p_permute_vec[4] = {0, 2, 3, 1}; + + WORD8* __restrict__ p_inp = + (WORD8* __restrict__)input.const_data_ptr(); + + xa_nn_transpose_8_8( + pin, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + 4, // input dimensions + 4); // output dimensions + + const int8_t* __restrict__ in_data = pin; + int8_t* __restrict__ out_data = out.mutable_data_ptr(); + const int32_t* __restrict__ zero_point = + in_zero_point.const_data_ptr(); + int32_t in_plane = in_c * in_h * in_w; + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; + for (size_t n = 0; n < batch_size; ++n) { + xa_nn_im2row_quantized( + &in_data[n * in_plane], + per_tensor_quantized ? zero_point[0] : zero_point[n], + in_c, + in_h, + in_w, + out_h, + out_w, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + &out_data[n * out_plane], + 1 /*channel_last*/); + } + } else { +#define typed_im2row(dtype, ctype) \ + case ScalarType::dtype: { \ + const ctype* __restrict__ in_data = input.const_data_ptr(); \ + ctype* __restrict__ out_data = out.mutable_data_ptr(); \ + const int32_t* __restrict__ zero_point = \ + in_zero_point.const_data_ptr(); \ + int32_t in_plane = in_c * in_h * in_w; \ + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; \ + for (size_t n = 0; n < batch_size; ++n) { \ + im2row_( \ + &in_data[n * in_plane], \ + per_tensor_quantized ? zero_point[0] : zero_point[n], \ + in_c, \ + in_h, \ + in_w, \ + out_h, \ + out_w, \ + kernel_h, \ + kernel_w, \ + pad_h, \ + pad_w, \ + stride_h, \ + stride_w, \ + dilation_h, \ + dilation_w, \ + &out_data[n * out_plane], \ + channel_last); \ + } \ + break; \ + } + + ScalarType dtype = input.scalar_type(); + switch (dtype) { + typed_im2row(Float, float); + typed_im2row(Byte, uint8_t); + typed_im2row(Char, int8_t); + default: + ET_DCHECK_MSG( + false, + "im2row not implemented for dtype %s", + torch::executor::toString(dtype)); + } +#undef typed_im2row + } +} + +void im2row_per_tensor_out( + __ET_UNUSED KernelRuntimeContext& ctx, + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride, + int64_t in_zero_point, + bool channel_last, + Tensor& out) { + // Compute the input tensor's dims + bool unit_height = input.dim() == 3; + const int32_t batch_size = input.size(0); + const int32_t in_c = + channel_last ? input.size(3 - unit_height) : input.size(1); + const int32_t in_h = + unit_height ? 1 : (channel_last ? input.size(1) : input.size(2)); + const int32_t in_w = + channel_last ? input.size(2 - unit_height) : input.size(3 - unit_height); + + // Get the kernel parameters + int32_t kernel_h = kernel_size[0]; + int32_t kernel_w = kernel_size[1]; + int32_t dilation_h = dilation[0]; + int32_t dilation_w = dilation[1]; + int32_t pad_h = padding[0]; + int32_t pad_w = padding[1]; + int32_t stride_h = stride[0]; + int32_t stride_w = stride[1]; + + // If we were to apply a convolution on the input tensor, compute the output + // height and width. + int32_t out_h = + (in_h + 2 * pad_h - dilation_h * (kernel_h - 1) - 1) / stride_h + 1; + int32_t out_w = + (in_w + 2 * pad_w - dilation_w * (kernel_w - 1) - 1) / stride_w + 1; + + ET_DCHECK_MSG( + (out_h * out_w) == out.size(1), "dimension mismatch for output"); + ET_DCHECK_MSG( + (kernel_h * kernel_w * in_c) == out.size(2), + "dimension mismatch for output"); + + bool optimized = false; + if (input.scalar_type() == ScalarType::Char || + input.scalar_type() == ScalarType::Byte) + optimized = true; + + if (!optimized) { + WORD8* ptr1 = (WORD8*)::impl::HiFi::kernels::allocate_temp_memory( + ctx, ((batch_size * in_c * in_h * in_w) + 8) * sizeof(WORD8)); + + WORD8* pin = (WORD8*)ALIGN_PTR(ptr1, 8); + + WORD32 p_inp_shape[4]; + p_inp_shape[0] = input.size(0); + p_inp_shape[1] = in_c; + p_inp_shape[2] = in_h; + p_inp_shape[3] = in_w; + + WORD32 p_out_shape[4]; + p_out_shape[0] = input.size(0); + p_out_shape[1] = in_h; + p_out_shape[2] = in_w; + p_out_shape[3] = in_c; + + WORD32 p_permute_vec[4] = {0, 2, 3, 1}; + + WORD8* __restrict__ p_inp = + (WORD8* __restrict__)input.const_data_ptr(); + + xa_nn_transpose_8_8( + pin, + p_out_shape, + p_inp, + p_inp_shape, + p_permute_vec, + 4, // input dimensions + 4); // output dimensions + + const int8_t* __restrict__ in_data = pin; + int8_t* __restrict__ out_data = out.mutable_data_ptr(); + int32_t in_plane = in_c * in_h * in_w; + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; + for (size_t n = 0; n < batch_size; ++n) { + xa_nn_im2row_quantized( + &in_data[n * in_plane], + (int32_t)in_zero_point, + in_c, + in_h, + in_w, + out_h, + out_w, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + &out_data[n * out_plane], + 1 /*channel_last*/); + } + } else { +#define typed_im2row_per_tensor(dtype, ctype) \ + case ScalarType::dtype: { \ + const ctype* __restrict__ in_data = input.const_data_ptr(); \ + ctype* __restrict__ out_data = out.mutable_data_ptr(); \ + int32_t in_plane = in_c * in_h * in_w; \ + int32_t out_plane = kernel_h * kernel_w * in_c * out_h * out_w; \ + for (size_t n = 0; n < batch_size; ++n) { \ + im2row_( \ + &in_data[n * in_plane], \ + in_zero_point, \ + in_c, \ + in_h, \ + in_w, \ + out_h, \ + out_w, \ + kernel_h, \ + kernel_w, \ + pad_h, \ + pad_w, \ + stride_h, \ + stride_w, \ + dilation_h, \ + dilation_w, \ + &out_data[n * out_plane], \ + channel_last); \ + } \ + break; \ + } + + ScalarType dtype = input.scalar_type(); + switch (dtype) { + typed_im2row_per_tensor(Float, float); + typed_im2row_per_tensor(Byte, uint8_t); + typed_im2row_per_tensor(Char, int8_t); + default: + ET_DCHECK_MSG( + false, + "im2row.per_tensor not implemented for dtype %s", + torch::executor::toString(dtype)); + } +#undef typed_im2row_per_tensor + } +} + +} // namespace native +} // namespace HiFi +} // namespace impl diff --git a/backends/cadence/hifi/operators/operators.h b/backends/cadence/hifi/operators/operators.h index f7f5194d91a..491a8d27365 100644 --- a/backends/cadence/hifi/operators/operators.h +++ b/backends/cadence/hifi/operators/operators.h @@ -191,6 +191,28 @@ void quantized_add_asym8uxasym8u_asym8u_per_tensor_out( int64_t out_zero_point, ::executorch::aten::Tensor& out); +void im2row_out( + ::executorch::runtime::KernelRuntimeContext& ctx, + const ::executorch::aten::Tensor& input, + ::executorch::aten::IntArrayRef kernel_size, + ::executorch::aten::IntArrayRef dilation, + ::executorch::aten::IntArrayRef padding, + ::executorch::aten::IntArrayRef stride, + const ::executorch::aten::Tensor& in_zero_point, + bool channel_last, + ::executorch::aten::Tensor& out); + +void im2row_per_tensor_out( + ::executorch::runtime::KernelRuntimeContext& ctx, + const ::executorch::aten::Tensor& input, + ::executorch::aten::IntArrayRef kernel_size, + ::executorch::aten::IntArrayRef dilation, + ::executorch::aten::IntArrayRef padding, + ::executorch::aten::IntArrayRef stride, + int64_t in_zero_point, + bool channel_last, + ::executorch::aten::Tensor& out); + } // namespace native } // namespace HiFi } // namespace impl diff --git a/backends/cadence/hifi/operators/targets.bzl b/backends/cadence/hifi/operators/targets.bzl index 1f9814c4a4e..4c9454ab85b 100644 --- a/backends/cadence/hifi/operators/targets.bzl +++ b/backends/cadence/hifi/operators/targets.bzl @@ -53,6 +53,7 @@ OPERATORS = [ "ge", "gt", "hardtanh", + "im2row_out", "le", "lt", "masked_fill", diff --git a/backends/cadence/hifi/operators/tests/test_op_im2row_out.cpp b/backends/cadence/hifi/operators/tests/test_op_im2row_out.cpp new file mode 100644 index 00000000000..c4883372823 --- /dev/null +++ b/backends/cadence/hifi/operators/tests/test_op_im2row_out.cpp @@ -0,0 +1,439 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace impl { +namespace HiFi { +namespace native { +namespace { + +using ::executorch::aten::ArrayRef; +using ::executorch::aten::IntArrayRef; +using ::executorch::aten::Scalar; +using ::executorch::aten::ScalarType; +using ::executorch::aten::Tensor; +using ::executorch::aten::TensorImpl; +using ::executorch::runtime::Error; +using ::executorch::runtime::KernelRuntimeContext; +using ::executorch::runtime::runtime_init; +using ::executorch::runtime::testing::TensorFactory; + +class HiFiIm2rowTest : public OperatorTest { + public: + protected: + void im2row_out( + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride, + const Tensor& in_zero_point, + bool channel_last, + Tensor& out) { + ::impl::HiFi::native::im2row_out( + context_, input, kernel_size, dilation, padding, stride, + in_zero_point, channel_last, out); + } + + void im2row_per_tensor_out( + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride, + int64_t in_zero_point, + bool channel_last, + Tensor& out) { + ::impl::HiFi::native::im2row_per_tensor_out( + context_, input, kernel_size, dilation, padding, stride, + in_zero_point, channel_last, out); + } +}; + +// Test basic 3x3 kernel with NCHW layout +TEST_F(HiFiIm2rowTest, Basic3x3Kernel) { + TensorFactory tf; + TensorFactory tf_int; + + // Input shape: (1, 8, 5, 4) - batch, channels, height, width + const std::vector input_sizes{1, 8, 5, 4}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {1, 1}; + const int64_t padding[] = {0, 0}; + const int64_t stride[] = {1, 1}; + const bool channel_last = false; + + // Calculate output dimensions + // out_h = (in_h + 2*pad_h - dilation_h*(kernel_h-1) - 1) / stride_h + 1 + // = (5 + 0 - 1*2 - 1) / 1 + 1 = 3 + // out_w = (4 + 0 - 1*2 - 1) / 1 + 1 = 2 + // output_shape: (batch, out_h*out_w, kernel_h*kernel_w*channels) + // = (1, 6, 72) + const std::vector output_sizes{1, 6, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor zero_point = tf_int.zeros({1}); + Tensor out = tf.zeros(output_sizes); + + im2row_out(input, kernel_size, dilation, padding, stride, + zero_point, channel_last, out); + + // Print ALL output values to check for zero output error + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== Basic3x3Kernel Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; // New line every 10 elements + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +// Test with stride=2 +TEST_F(HiFiIm2rowTest, WithStride2) { + TensorFactory tf; + TensorFactory tf_int; + + const std::vector input_sizes{1, 8, 5, 4}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {1, 1}; + const int64_t padding[] = {0, 0}; + const int64_t stride[] = {2, 2}; + const bool channel_last = false; + + // out_h = (5 + 0 - 1*2 - 1) / 2 + 1 = 2 + // out_w = (4 + 0 - 1*2 - 1) / 2 + 1 = 1 + const std::vector output_sizes{1, 2, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor zero_point = tf_int.zeros({1}); + Tensor out = tf.zeros(output_sizes); + + im2row_out(input, kernel_size, dilation, padding, stride, + zero_point, channel_last, out); + + // Print ALL output values + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== WithStride2 Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +// Test with padding +TEST_F(HiFiIm2rowTest, WithPadding) { + TensorFactory tf; + TensorFactory tf_int; + + const std::vector input_sizes{1, 8, 5, 4}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {1, 1}; + const int64_t padding[] = {1, 1}; + const int64_t stride[] = {1, 1}; + const bool channel_last = false; + + // out_h = (5 + 2*1 - 1*2 - 1) / 1 + 1 = 5 + // out_w = (4 + 2*1 - 1*2 - 1) / 1 + 1 = 4 + const std::vector output_sizes{1, 20, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor zero_point = tf_int.zeros({1}); + Tensor out = tf.zeros(output_sizes); + + im2row_out(input, kernel_size, dilation, padding, stride, + zero_point, channel_last, out); + + // Print ALL output values + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== WithPadding Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +// Test channels last (NHWC) layout +TEST_F(HiFiIm2rowTest, ChannelsLast) { + TensorFactory tf; + TensorFactory tf_int; + + // Input shape for NHWC: (1, 5, 8, 8) - batch, height, width, channels + const std::vector input_sizes{1, 5, 8, 8}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {1, 1}; + const int64_t padding[] = {0, 0}; + const int64_t stride[] = {1, 1}; + const bool channel_last = true; + + // out_h = (5 + 0 - 1*2 - 1) / 1 + 1 = 3 + // out_w = (8 + 0 - 1*2 - 1) / 1 + 1 = 6 + // channels = 8 + const std::vector output_sizes{1, 18, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor zero_point = tf_int.zeros({1}); + Tensor out = tf.zeros(output_sizes); + + im2row_out(input, kernel_size, dilation, padding, stride, + zero_point, channel_last, out); + + // Print ALL output values + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== ChannelsLast Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +// Test with dilation +TEST_F(HiFiIm2rowTest, WithDilation) { + TensorFactory tf; + TensorFactory tf_int; + + const std::vector input_sizes{1, 8, 6, 5}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {2, 2}; + const int64_t padding[] = {2, 2}; + const int64_t stride[] = {1, 1}; + const bool channel_last = false; + + // out_h = (6 + 2*2 - 2*2 - 1) / 1 + 1 = 6 + // out_w = (5 + 2*2 - 2*2 - 1) / 1 + 1 = 5 + const std::vector output_sizes{1, 30, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor zero_point = tf_int.zeros({1}); + Tensor out = tf.zeros(output_sizes); + + im2row_out(input, kernel_size, dilation, padding, stride, + zero_point, channel_last, out); + + // Print ALL output values + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== WithDilation Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +// Test im2row_per_tensor_out with zero_point = 0 +TEST_F(HiFiIm2rowTest, PerTensorZeroPointZero) { + TensorFactory tf; + + const std::vector input_sizes{1, 8, 5, 4}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {1, 1}; + const int64_t padding[] = {0, 0}; + const int64_t stride[] = {1, 1}; + const int64_t in_zero_point = 0; + const bool channel_last = false; + + const std::vector output_sizes{1, 6, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor out = tf.zeros(output_sizes); + + im2row_per_tensor_out(input, kernel_size, dilation, padding, stride, + in_zero_point, channel_last, out); + + // Print ALL output values + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== PerTensorZeroPointZero Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +// Test im2row_per_tensor_out with non-zero zero_point +TEST_F(HiFiIm2rowTest, PerTensorNonZeroZeroPoint) { + TensorFactory tf; + + const std::vector input_sizes{1, 8, 5, 4}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {1, 1}; + const int64_t padding[] = {0, 0}; + const int64_t stride[] = {1, 1}; + const int64_t in_zero_point = 128; + const bool channel_last = false; + + const std::vector output_sizes{1, 6, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor out = tf.zeros(output_sizes); + + im2row_per_tensor_out(input, kernel_size, dilation, padding, stride, + in_zero_point, channel_last, out); + + // Print ALL output values + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== PerTensorNonZeroZeroPoint Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +// Test im2row_per_tensor_out with channels last and non-zero zero_point +TEST_F(HiFiIm2rowTest, PerTensorChannelsLastNonZeroZeroPoint) { + TensorFactory tf; + + const std::vector input_sizes{1, 5, 8, 8}; + const int64_t kernel_size[] = {3, 3}; + const int64_t dilation[] = {1, 1}; + const int64_t padding[] = {0, 0}; + const int64_t stride[] = {1, 1}; + const int64_t in_zero_point = 64; + const bool channel_last = true; + + const std::vector output_sizes{1, 18, 72}; + + Tensor input = tf.ones(input_sizes); + Tensor out = tf.zeros(output_sizes); + + im2row_per_tensor_out(input, kernel_size, dilation, padding, stride, + in_zero_point, channel_last, out); + + // Print ALL output values + const float* out_data = out.const_data_ptr(); + std::cout << "\n=== PerTensorChannelsLastNonZeroZeroPoint Output (all " << out.numel() << " elements) ===" << std::endl; + for (int i = 0; i < out.numel(); ++i) { + std::cout << out_data[i] << " "; + if ((i + 1) % 10 == 0) std::cout << std::endl; + } + std::cout << std::endl; + + // Verify output has NO zeros at all + int zero_count = 0; + for (int i = 0; i < out.numel(); ++i) { + if (out_data[i] == 0.0f) { + zero_count++; + if (zero_count <= 10) { + std::cout << "ZERO found at index " << i << std::endl; + } + } + } + std::cout << "Total zeros found: " << zero_count << " out of " << out.numel() << " elements" << std::endl; + EXPECT_EQ(zero_count, 0) << "Output should have NO zeros, but found " << zero_count << " zeros"; +} + +} // namespace +} // namespace native +} // namespace HiFi +} // namespace impl diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c index 792b152e1fa..35581a42471 100644 --- a/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c @@ -2,6 +2,7 @@ #include "xa_nnlib_common_fpu.h" #include "xa_nn_common.h" #include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" #include "xa_nnlib_kernels_api.h" diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c new file mode 100644 index 00000000000..3746991d430 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_im2row.c @@ -0,0 +1,133 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + +WORD32 xa_nn_im2row_quantized( + const WORD8* __restrict__ data_im, + const WORD32 in_zero_point, + /* input parameters*/ + const WORD32 channels, + const WORD32 height, + const WORD32 width, + /* output parameters */ + const WORD32 out_height, + const WORD32 out_width, + /* convolution parameters */ + const WORD32 kernel_h, + const WORD32 kernel_w, + const WORD32 pad_h, + const WORD32 pad_w, + const WORD32 stride_h, + const WORD32 stride_w, + const WORD32 dilation_h, + const WORD32 dilation_w, + WORD8* __restrict__ data_col, + WORD32 channels_last) +{ + const WORD32 channels_col = channels * kernel_h * kernel_w; + + // If the layout is NHWC, we can copy 'channels' worth of contiguous data + // points when performing im2row. + if (channels_last) { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + // Each point in the output domain is the result of applying a filter of + // size kernel_h x kernel_w x channels on the input. But since channels + // is contiguous, we will not explicitly have a loop for it. + for (int _kh = 0; _kh < kernel_h; ++_kh) { + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + for (int _kw = 0; _kw < kernel_w; ++_kw) { + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + + // h_im and w_im are the actual height and width coordinates of the + // input tensor from where we need to copy 'channels' points. + const int8_t* __restrict__ slice_im = + data_im + (h_im * width + w_im) * channels; + int8_t* __restrict__ slice_col = data_col + i_col * channels_col + + (_kh * kernel_w + _kw) * channels; + // If the coordinates were within the input domain, we copy + // 'channels' contiguous values. Otherwise we will fill the output + // with 0's. + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + const ae_int24x2 *pae_inp = (const ae_int24x2 *)slice_im; + ae_int24x2 *pae_out = (ae_int24x2 *)slice_col; + ae_valign inp_a, out_a; + inp_a = AE_LA64_PP(pae_inp); + out_a = AE_ZALIGN64(); + + int ic; + for(ic = 0; ic < channels; ic += 6) + { + ae_int24x2 d0; + AE_LA24X2_IP(d0, inp_a, pae_inp); + AE_SA24X2_IP(d0, out_a, pae_out); + } + AE_SA64POS_FP(out_a, pae_out); + for(int i = ic; i < (channels & 5); i++) + { + slice_col[i] = slice_im[i]; + } + } + else { + ae_int24x2 *pae_out = (ae_int24x2 *)slice_col; + ae_valign out_a; + out_a = AE_ZALIGN64(); + + ae_int32x2 tmp = AE_MOVDA32(in_zero_point); + ae_int32x2 in_zero_point32x2 = AE_SLLI32(tmp, 8); + in_zero_point32x2 = AE_OR32(in_zero_point32x2, tmp); + in_zero_point32x2 = AE_SLLI32(in_zero_point32x2, 8); + in_zero_point32x2 = AE_OR32(in_zero_point32x2, in_zero_point32x2); + + ae_int24x2 d0 = AE_MOVINT24X2_FROMINT32X2(in_zero_point32x2); + int ic; + for(ic = 0; ic < channels; ic += 6) + { + AE_SA24X2_IP(d0, out_a, pae_out); + } + AE_SA64POS_FP(out_a, pae_out); + for(int i = ic; ic < (channels & 5); i++) + { + slice_col[i] = (int8_t)(in_zero_point); + } + } + } + } + } + } + } else { + // Iterate over the output domain + for (int _h = 0; _h < out_height; ++_h) { + for (int _w = 0; _w < out_width; ++_w) { + int32_t i_col = _h * out_width + _w; + + // Each point in the output domain is the result of applying a filter + // of size chanenls * kernel_h x kernel_w on the input + for (int _c = 0; _c < channels; ++_c) { + for (int _kh = 0; _kh < kernel_h; ++_kh) { + for (int _kw = 0; _kw < kernel_w; ++_kw) { + // c_col is the linearized access in the channels_col vector. + int32_t c_col = (_c * kernel_h + _kh) * kernel_w + _kw; + // h_im and w_im are the actual height and width coordinates of + // the input tensor that we need to copy to the output. + int32_t h_im = _h * stride_h - pad_h + _kh * dilation_h; + int32_t w_im = _w * stride_w - pad_w + _kw * dilation_w; + // If the current data access is within the input tensor, copy the + // value + data_col[i_col * channels_col + c_col] = + (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) + ? data_im[(_c * height + h_im) * width + w_im] + : (int8_t)(in_zero_point); + } + } + } + } + } + } + return 0; +}