Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 14 additions & 15 deletions backends/cadence/executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@

static uint8_t method_allocator_pool[18 * 1024U]; // 4 MB

using namespace torch::executor;
#include <xtensa/config/core.h>

#define APP_MU MUB
Expand All @@ -48,8 +47,8 @@ using namespace torch::executor;
/* How many message is used to test message sending */
#define MSG_LENGTH 32U

using torch::executor::Error;
using torch::executor::Result;
using executorch::runtime::Error;
using executorch::runtime::Result;

void LED_INIT();
void LED_TOGGLE();
Expand Down Expand Up @@ -106,13 +105,13 @@ int main(int argc, char** argv) {
BOARD_InitDebugConsole();
ET_LOG(Info, "Booted up in DSP.");

torch::executor::runtime_init();
executorch::runtime::runtime_init();

auto loader =
torch::executor::util::BufferDataLoader(model_pte, sizeof(model_pte));
executorch::extension::BufferDataLoader(model_pte, sizeof(model_pte));

Result<torch::executor::Program> program =
torch::executor::Program::load(&loader);
Result<executorch::runtime::Program> program =
executorch::runtime::Program::load(&loader);
if (!program.ok()) {
ET_LOG(
Error,
Expand All @@ -132,7 +131,7 @@ int main(int argc, char** argv) {
}
ET_LOG(Info, "ET: Running method %s", method_name);

Result<torch::executor::MethodMeta> method_meta =
Result<executorch::runtime::MethodMeta> method_meta =
program->method_meta(method_name);
if (!method_meta.ok()) {
ET_LOG(
Expand All @@ -142,12 +141,12 @@ int main(int argc, char** argv) {
(unsigned int)method_meta.error());
}

torch::executor::MemoryAllocator method_allocator{
torch::executor::MemoryAllocator(
executorch::runtime::MemoryAllocator method_allocator{
executorch::runtime::MemoryAllocator(
sizeof(method_allocator_pool), method_allocator_pool)};

std::vector<std::unique_ptr<uint8_t[]>> planned_buffers; // Owns the memory
std::vector<torch::executor::Span<uint8_t>>
std::vector<executorch::runtime::Span<uint8_t>>
planned_spans; // Passed to the allocator
size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers();

Expand All @@ -161,13 +160,13 @@ int main(int argc, char** argv) {
planned_spans.push_back({planned_buffers.back().get(), buffer_size});
}

torch::executor::HierarchicalAllocator planned_memory(
executorch::runtime::HierarchicalAllocator planned_memory(
{planned_spans.data(), planned_spans.size()});

torch::executor::MemoryManager memory_manager(
executorch::runtime::MemoryManager memory_manager(
&method_allocator, &planned_memory);

Result<torch::executor::Method> method =
Result<executorch::runtime::Method> method =
program->load_method(method_name, &memory_manager);
if (!method.ok()) {
ET_LOG(
Expand All @@ -178,7 +177,7 @@ int main(int argc, char** argv) {
}

ET_LOG(Info, "Method loaded.");
torch::executor::util::prepare_input_tensors(*method);
executorch::extension::prepare_input_tensors(*method);
ET_LOG(Info, "Starting the model execution...");

Error status = method->execute();
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/dequantize_per_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ namespace impl {
namespace HiFi {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using ScalarType = exec_aten::ScalarType;

void dequantize_per_tensor_out(
KernelRuntimeContext& context,
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/quantize_per_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ namespace impl {
namespace HiFi {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using ScalarType = exec_aten::ScalarType;

// Quantize the input tensor (PT2 version). Note that quant_<min,max> are not
// used in any computation.
Expand Down
8 changes: 4 additions & 4 deletions backends/cadence/hifi/operators/quantized_layer_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#include <cmath>
#include <tuple>

using Tensor = exec_aten::Tensor;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;

namespace impl {
Expand Down Expand Up @@ -119,14 +119,14 @@ void quantized_layer_norm_out(
const Tensor& input,
const Tensor& in_scale,
const Tensor& in_zero_point,
const exec_aten::IntArrayRef normalized_shape,
const executorch::aten::IntArrayRef normalized_shape,
const Tensor& weight,
const Tensor& bias,
double eps,
double output_scale,
int64_t output_zero_point,
Tensor& out) {
if (input.scalar_type() == exec_aten::ScalarType::Byte) {
if (input.scalar_type() == executorch::aten::ScalarType::Byte) {
quantized_layer_norm_<uint8_t>(
input,
in_scale,
Expand All @@ -137,7 +137,7 @@ void quantized_layer_norm_out(
output_scale,
output_zero_point,
out);
} else if (input.scalar_type() == exec_aten::ScalarType::Char) {
} else if (input.scalar_type() == executorch::aten::ScalarType::Char) {
quantized_layer_norm_<int8_t>(
input,
in_scale,
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/quantized_linear_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ namespace impl {
namespace HiFi {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;

void quantized_linear_out(
Expand All @@ -28,7 +28,7 @@ void quantized_linear_out(
const Tensor& out_multiplier,
const Tensor& out_shift,
int64_t out_zero_point,
const exec_aten::optional<Tensor>& offset,
const executorch::aten::optional<Tensor>& offset,
Tensor& out) {
// input comes in shape [leading_dims, in_dim]
// weight comes in shape [out_dim, in_dim]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ namespace impl {
namespace reference {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using ScalarType = exec_aten::ScalarType;

void dequantize_per_tensor_out(
KernelRuntimeContext& context,
Expand Down
2 changes: 1 addition & 1 deletion backends/cadence/reference/operators/op_embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace torch {
namespace executor {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;

void embedding_out(
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/reference/operators/op_full.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ namespace torch {
namespace executor {
namespace native {

using Tensor = exec_aten::Tensor;
using ScalarType = exec_aten::ScalarType;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;

Tensor& full_out(
KernelRuntimeContext& ctx,
Expand Down
2 changes: 1 addition & 1 deletion backends/cadence/reference/operators/op_view_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace torch {
namespace executor {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;

Tensor& view_copy_out(
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/reference/operators/quantize_per_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ namespace impl {
namespace reference {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;
using ScalarType = exec_aten::ScalarType;

// Quantize the input tensor (PT2 version). Note that quant_<min,max> are not
// used in any computation.
Expand Down
8 changes: 4 additions & 4 deletions backends/cadence/reference/operators/quantized_conv_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ namespace impl {
namespace reference {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;

// This implements a generic 2d conv kernel that operates on raw pointers.
Expand Down Expand Up @@ -158,9 +158,9 @@ void quantized_conv_out(
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
exec_aten::IntArrayRef stride,
exec_aten::IntArrayRef padding,
exec_aten::IntArrayRef dilation,
executorch::aten::IntArrayRef stride,
executorch::aten::IntArrayRef padding,
executorch::aten::IntArrayRef dilation,
int64_t groups,
int64_t in_zero_point,
const Tensor& weight_zero_point,
Expand Down
6 changes: 3 additions & 3 deletions backends/cadence/reference/operators/quantized_layer_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,14 +115,14 @@ void quantized_layer_norm_out(
const Tensor& input,
const Tensor& in_scale,
const Tensor& in_zero_point,
const exec_aten::IntArrayRef normalized_shape,
const executorch::aten::IntArrayRef normalized_shape,
const Tensor& weight,
const Tensor& bias,
double eps,
double output_scale,
int64_t output_zero_point,
Tensor& out) {
if (input.scalar_type() == exec_aten::ScalarType::Byte) {
if (input.scalar_type() == executorch::aten::ScalarType::Byte) {
quantized_layer_norm_<uint8_t>(
input,
in_scale,
Expand All @@ -133,7 +133,7 @@ void quantized_layer_norm_out(
output_scale,
output_zero_point,
out);
} else if (input.scalar_type() == exec_aten::ScalarType::Char) {
} else if (input.scalar_type() == executorch::aten::ScalarType::Char) {
quantized_layer_norm_<int8_t>(
input,
in_scale,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ void quantized_linear_out(
const Tensor& out_multiplier,
const Tensor& out_shift,
int64_t out_zero_point,
const exec_aten::optional<Tensor>& offset,
const executorch::aten::optional<Tensor>& offset,
Tensor& out) {
// Assuming uint8_t for now, but needs to be updated for other quantization
// types
Expand Down
8 changes: 4 additions & 4 deletions backends/cadence/reference/operators/quantized_matmul_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void inline _typed_quantized_matmul(
int64_t X_zero_point,
const Tensor& Y,
int64_t Y_zero_point,
const exec_aten::optional<Tensor>& bias,
const executorch::aten::optional<Tensor>& bias,
int64_t out_multiplier,
int64_t out_shift,
int64_t out_zero_point,
Expand Down Expand Up @@ -114,13 +114,13 @@ void quantized_matmul_out(
int64_t X_zero_point,
const Tensor& Y,
int64_t Y_zero_point,
const exec_aten::optional<Tensor>& bias,
const executorch::aten::optional<Tensor>& bias,
int64_t out_multiplier,
int64_t out_shift,
int64_t out_zero_point,
bool transposed,
Tensor& out) {
if (out.scalar_type() == exec_aten::ScalarType::Byte) {
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
_typed_quantized_matmul<uint8_t>(
X,
X_zero_point,
Expand All @@ -132,7 +132,7 @@ void quantized_matmul_out(
out_zero_point,
transposed,
out);
} else if (out.scalar_type() == exec_aten::ScalarType::Char) {
} else if (out.scalar_type() == executorch::aten::ScalarType::Char) {
_typed_quantized_matmul<int8_t>(
X,
X_zero_point,
Expand Down
6 changes: 3 additions & 3 deletions backends/cadence/reference/operators/quantized_relu_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ namespace impl {
namespace reference {
namespace native {

using Tensor = exec_aten::Tensor;
using executorch::aten::Tensor;
using executorch::runtime::KernelRuntimeContext;

template <typename T>
Expand Down Expand Up @@ -51,15 +51,15 @@ void quantized_relu_out(
const Tensor& out_multiplier,
const Tensor& out_shift,
Tensor& output) {
if (input.scalar_type() == exec_aten::ScalarType::Byte) {
if (input.scalar_type() == executorch::aten::ScalarType::Byte) {
quantized_relu_<uint8_t>(
input,
in_zero_point,
out_zero_point,
out_multiplier,
out_shift,
output);
} else if (input.scalar_type() == exec_aten::ScalarType::Char) {
} else if (input.scalar_type() == executorch::aten::ScalarType::Char) {
quantized_relu_<int8_t>(
input,
in_zero_point,
Expand Down
Loading