Skip to content

Commit 29ec632

Browse files
authored
Integrating executorch (#11)
* Integrating executorch * Adding executorch static libs * Fixed build issue * Fixed missing lib * Disable ort
1 parent f9a6e61 commit 29ec632

File tree

141 files changed

+16791
-10
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

141 files changed

+16791
-10
lines changed

framework/src/vx_context.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ vx_char targetModules[][VX_MAX_TARGET_NAME] = {
3636
"openvx-onnxRT",
3737
"openvx-ai-server",
3838
"openvx-liteRT",
39+
"openvx-torch",
3940
};
4041

4142
const vx_char extensions[] =

include/VX/vx_corevx_ext.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,10 @@ enum vx_kernel_ext_e
5656
* \brief The LiteRT CPU Inference kernel.
5757
*/
5858
VX_KERNEL_LITERT_CPU_INF = VX_KERNEL_BASE(VX_ID_EDGE_AI, VX_LIBRARY_KHR_BASE) + 0x3,
59+
/*!
60+
* \brief The Torch CPU Inference kernel.
61+
*/
62+
VX_KERNEL_TORCH_CPU_INF = VX_KERNEL_BASE(VX_ID_EDGE_AI, VX_LIBRARY_KHR_BASE) + 0x4,
5963
};
6064

6165
/*! \brief addtitional tensor attributes.

kernels/executorch/BUILD

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
2+
cc_library(
3+
name = "torch_kernels",
4+
srcs = glob([
5+
"*.cpp",
6+
]),
7+
hdrs = glob([
8+
"*.h",
9+
"*.hpp",
10+
]),
11+
includes = [
12+
".",
13+
"//framework/include",
14+
],
15+
deps = [
16+
"//:corevx",
17+
"//third_party:executorch",
18+
],
19+
alwayslink = True,
20+
visibility = ["//visibility:public"]
21+
)

kernels/executorch/torch.hpp

Lines changed: 186 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,186 @@
1+
/**
2+
* @file torch.hpp
3+
* @brief
4+
* @version 0.1
5+
* @date 2025-04-30
6+
*
7+
* @copyright Copyright (c) 2025
8+
*
9+
*/
10+
#pragma once
11+
12+
#include <iostream>
13+
#include <fstream>
14+
#include <memory>
15+
16+
#include <executorch/extension/module/module.h>
17+
#include <executorch/extension/tensor/tensor.h>
18+
#include <executorch/devtools/etdump/etdump_flatcc.h>
19+
20+
using namespace ::executorch::extension;
21+
22+
/**
23+
* @brief Class to run TFLite models
24+
*
25+
*/
26+
class TorchRunner
27+
{
28+
public:
29+
/**
30+
* @brief TorchRunner Constructor
31+
*/
32+
TorchRunner() : _modelLoaded(false), _traceEnabled(false), _module(nullptr) {};
33+
34+
/**
35+
* @brief Initialize the TFLite interpreter (load the model)
36+
* @param filename Path to the ONNX model file
37+
* @return VX_SUCCESS on success, VX_FAILURE otherwise
38+
*/
39+
vx_status init(std::string &filename)
40+
{
41+
vx_status status = VX_SUCCESS;
42+
43+
// Initialize the module
44+
if (!filename.empty() && !_modelLoaded)
45+
{
46+
// Load model
47+
_module = std::make_unique<Module>(filename, Module::LoadMode::MmapUseMlock, std::make_unique<executorch::etdump::ETDumpGen>());
48+
const auto error = _module->load(executorch::runtime::Program::Verification::InternalConsistency);
49+
50+
if (!_module->is_loaded())
51+
{
52+
std::cerr << "Failed to load module: " << filename << std::endl;
53+
status = VX_FAILURE;
54+
}
55+
56+
if (VX_SUCCESS == status)
57+
{
58+
// Set the model loaded flag
59+
_modelLoaded = true;
60+
}
61+
}
62+
63+
return status;
64+
}
65+
66+
/**
67+
* @brief Allocate memory for input and output tensors
68+
* @param inputTensors Input tensors
69+
* @param inputDims Input tensor dimensions
70+
* @param outputTensors Output tensors
71+
* @param outputDims Output tensor dimensions
72+
* @return VX_SUCCESS on success, VX_FAILURE otherwise
73+
*/
74+
vx_status allocate(
75+
std::vector<std::pair<float *, vx_size>> &inputTensors, std::vector<std::vector<size_t>> &inputDims,
76+
std::vector<std::pair<float *, vx_size>> &outputTensors, std::vector<std::vector<size_t>> &outputDims)
77+
{
78+
vx_status status = VX_SUCCESS;
79+
80+
// Check if the model is loaded
81+
if (!_modelLoaded)
82+
{
83+
std::cerr << "Model not loaded" << std::endl;
84+
status = VX_FAILURE;
85+
}
86+
87+
if (VX_SUCCESS == status)
88+
{
89+
// Allocate tensor pointers and bind with pre-allocated memory
90+
for (std::size_t i = 0; i < inputTensors.size(); ++i)
91+
{
92+
std::vector<executorch::aten::SizesType> dims;
93+
std::transform(inputDims[i].begin(), inputDims[i].end(), std::back_inserter(dims),
94+
[](size_t n)
95+
{ return static_cast<executorch::aten::SizesType>(n); });
96+
auto tensor = make_tensor_ptr(dims, inputTensors[i].first);
97+
// Bind input tensor to the module
98+
_module->set_input(tensor, i);
99+
}
100+
101+
for (std::size_t i = 0; i < outputTensors.size(); ++i)
102+
{
103+
std::vector<executorch::aten::SizesType> dims;
104+
std::transform(outputDims[i].begin(), outputDims[i].end(), std::back_inserter(dims),
105+
[](size_t n)
106+
{ return static_cast<executorch::aten::SizesType>(n); });
107+
auto tensor = make_tensor_ptr(dims, outputTensors[i].first);
108+
// Bind output tensor to the module
109+
_module->set_output(tensor, i);
110+
}
111+
}
112+
113+
return status;
114+
}
115+
116+
/**
117+
* @brief Run the kernel (execute the model)
118+
* @return VX_SUCCESS on success, VX_FAILURE otherwise
119+
*/
120+
vx_status run()
121+
{
122+
vx_status status = VX_SUCCESS;
123+
124+
// Check if the model is loaded
125+
if (!_modelLoaded)
126+
{
127+
std::cerr << "Model not loaded" << std::endl;
128+
status = VX_FAILURE;
129+
}
130+
131+
if (VX_SUCCESS == status)
132+
{
133+
try
134+
{
135+
// Run inference
136+
_module->load_forward();
137+
const auto result = _module->forward();
138+
139+
// Check the result
140+
if (!result.ok())
141+
{
142+
std::cerr << "Failed to run inference" << std::endl;
143+
if (_traceEnabled)
144+
{
145+
dumpTrace();
146+
}
147+
status = VX_FAILURE;
148+
}
149+
}
150+
catch (...)
151+
{
152+
// std::cerr << "Failed to set trace enabled: " << e.what() << std::endl;
153+
status = VX_FAILURE;
154+
}
155+
}
156+
157+
return status;
158+
}
159+
160+
private:
161+
bool _modelLoaded;
162+
bool _traceEnabled;
163+
std::unique_ptr<Module> _module;
164+
165+
/**
166+
* @brief Dump the profile trace data to a file
167+
*/
168+
void dumpTrace()
169+
{
170+
if (auto *etdump = dynamic_cast<executorch::etdump::ETDumpGen *>(_module->event_tracer()))
171+
{
172+
const auto trace = etdump->get_etdump_data();
173+
174+
if (trace.buf && trace.size > 0)
175+
{
176+
std::unique_ptr<void, decltype(&free)> guard(trace.buf, free);
177+
std::ofstream file("trace.etdump", std::ios::binary);
178+
179+
if (file)
180+
{
181+
file.write(static_cast<const char *>(trace.buf), trace.size);
182+
}
183+
}
184+
}
185+
}
186+
};

targets/executorch/BUILD

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
2+
cc_library(
3+
name = "torch",
4+
srcs = glob([
5+
"*.cpp",
6+
"*.h",
7+
]),
8+
includes = [
9+
".",
10+
"//framework/include",
11+
"//kernels/torch",
12+
],
13+
deps = [
14+
"//:corevx",
15+
"//kernels/executorch:torch_kernels"
16+
],
17+
alwayslink = True,
18+
visibility = ["//visibility:public"]
19+
)
20+
21+
cc_shared_library(
22+
name = "openvx-torch",
23+
deps = [
24+
":torch",
25+
],
26+
visibility = ["//visibility:public"]
27+
)
28+
29+
cc_import(
30+
name = "imported_openvx_torch",
31+
shared_library = ":openvx-torch",
32+
visibility = ["//visibility:public"]
33+
)

0 commit comments

Comments
 (0)