|
| 1 | +#include <executorch/runtime/kernel/kernel_includes.h> |
| 2 | +#include <executorch/runtime/core/portable_type/tensor.h> // for torch::executor::Tensor |
| 3 | +#include <executorch/runtime/core/portable_type/scalar.h> // for torch::executor::Scalar |
| 4 | +#include <iostream> |
| 5 | + |
| 6 | +namespace cortex_m { |
| 7 | +namespace native { |
| 8 | + |
| 9 | +using Tensor = executorch::aten::Tensor; |
| 10 | +using ScalarType = executorch::aten::ScalarType; |
| 11 | +using Scalar = executorch::aten::Scalar; |
| 12 | +using KernelRuntimeContext = torch::executor::KernelRuntimeContext; |
| 13 | + |
| 14 | +torch::executor::Tensor& aten_add_tensor( |
| 15 | + torch::executor::KernelRuntimeContext& ctx, |
| 16 | + const torch::executor::Tensor& input1, |
| 17 | + const torch::executor::Tensor& input2, |
| 18 | + const torch::executor::Scalar& alpha, |
| 19 | + torch::executor::Tensor& out) { |
| 20 | + // Your CMSIS-NN optimized implementation here |
| 21 | + // Return 'out' tensor as per Executorch kernel signature |
| 22 | + std::cout << "add_out kernel called" << std::endl; |
| 23 | + ET_LOG(Info, "xxxxxxxxxx add_out kernel called"); |
| 24 | + |
| 25 | + assert(false); |
| 26 | + assert(true); |
| 27 | + return out; |
| 28 | +} |
| 29 | + |
| 30 | +torch::executor::Tensor& add_out( |
| 31 | + torch::executor::KernelRuntimeContext& ctx, |
| 32 | + const torch::executor::Tensor& input1, |
| 33 | + const torch::executor::Tensor& input2, |
| 34 | + const torch::executor::Scalar& alpha, |
| 35 | + torch::executor::Tensor& out) { |
| 36 | + std::cout << "add_out kernel called" << std::endl; |
| 37 | + ET_LOG(Info, "xxxxxxxxxx add_out kernel called"); |
| 38 | + |
| 39 | + // Ensure input is char type |
| 40 | + ET_CHECK_MSG( |
| 41 | + input1.scalar_type() == ScalarType::Char, |
| 42 | + "input1.scalar_type() %" PRId8 " is not char type", |
| 43 | + static_cast<int8_t>(input1.scalar_type())); |
| 44 | + |
| 45 | + ET_CHECK_MSG( |
| 46 | + input2.scalar_type() == ScalarType::Char, |
| 47 | + "input2.scalar_type() %" PRId8 " is not char type", |
| 48 | + static_cast<int8_t>(input2.scalar_type())); |
| 49 | + |
| 50 | + // Check output dtype is float |
| 51 | + ET_CHECK_MSG( |
| 52 | + out.scalar_type() == ScalarType::Float, |
| 53 | + "out.scalar_type() %" PRId8 " is not float", |
| 54 | + static_cast<int8_t>(out.scalar_type())); |
| 55 | + |
| 56 | + // Check dtype is int8 (Char) |
| 57 | + /*ET_CHECK_MSG( |
| 58 | + dtype == ScalarType::Char, |
| 59 | + "dtype %" PRId8 " is not int8 (Char)", |
| 60 | + static_cast<int8_t>(dtype));*/ |
| 61 | + |
| 62 | + assert(false); |
| 63 | + |
| 64 | + return out; |
| 65 | +} |
| 66 | + |
| 67 | +} // namespace native |
| 68 | +} // namespace cortex_m |
0 commit comments