Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 21 additions & 8 deletions backends/vulkan/runtime/VulkanBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,23 @@
#include <type_traits>
#include <vector>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace vulkan {
namespace {

using executorch::runtime::ArrayRef;
using executorch::runtime::Backend;
using executorch::runtime::BackendExecutionContext;
using executorch::runtime::BackendInitContext;
using executorch::runtime::CompileSpec;
using executorch::runtime::DelegateHandle;
using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::kTensorDimensionLimit;
using executorch::runtime::Result;

using namespace vkcompute;

// Flatbuffer types
Expand Down Expand Up @@ -357,7 +369,7 @@ class GraphBuilder {
bool maybe_resize_input(
ComputeGraph* graph,
const size_t input_i,
exec_aten::Tensor& et_tensor) {
executorch::aten::Tensor& et_tensor) {
ValueRef in_tensor_ref = graph->inputs()[input_i].value;
vTensorPtr in_tensor = graph->get_tensor(in_tensor_ref);

Expand Down Expand Up @@ -392,17 +404,18 @@ bool maybe_resize_input(
void maybe_resize_output(
ComputeGraph* graph,
const size_t output_i,
exec_aten::Tensor& et_tensor) {
executorch::aten::Tensor& et_tensor) {
ValueRef out_tensor_ref = graph->outputs()[output_i].value;
vTensorPtr out_tensor = graph->get_tensor(out_tensor_ref);

exec_aten::SizesType new_output_size[kTensorDimensionLimit];
executorch::aten::SizesType new_output_size[kTensorDimensionLimit];
size_t ndim = out_tensor->sizes().size();
for (int i = 0; i < ndim; ++i) {
new_output_size[i] = out_tensor->sizes()[i];
}

exec_aten::ArrayRef<exec_aten::SizesType> output_size{new_output_size, ndim};
executorch::aten::ArrayRef<executorch::aten::SizesType> output_size{
new_output_size, ndim};
Error err = resize_tensor(et_tensor, output_size);

ET_CHECK_MSG(err == Error::Ok, "Failed to resize output tensor.");
Expand Down Expand Up @@ -555,5 +568,5 @@ static auto success_with_compiler = register_backend(backend);

} // namespace
} // namespace vulkan
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
11 changes: 7 additions & 4 deletions backends/vulkan/runtime/VulkanDelegateHeader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,13 @@

#pragma clang diagnostic ignored "-Wdeprecated"

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace vulkan {

using executorch::runtime::Error;
using executorch::runtime::Result;

namespace {

struct ByteSlice {
Expand Down Expand Up @@ -101,5 +104,5 @@ Result<VulkanDelegateHeader> VulkanDelegateHeader::parse(const void* data) {
}

} // namespace vulkan
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
11 changes: 6 additions & 5 deletions backends/vulkan/runtime/VulkanDelegateHeader.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@

#include <executorch/runtime/core/result.h>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace vulkan {

// Byte decoding utilities
Expand All @@ -22,7 +22,8 @@ uint32_t getUInt16LE(const uint8_t* data);
struct VulkanDelegateHeader {
bool is_valid() const;

static Result<VulkanDelegateHeader> parse(const void* data);
static executorch::runtime::Result<VulkanDelegateHeader> parse(
const void* data);

uint32_t header_size;
uint32_t flatbuffer_offset;
Expand All @@ -32,5 +33,5 @@ struct VulkanDelegateHeader {
};

} // namespace vulkan
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
2 changes: 1 addition & 1 deletion backends/vulkan/test/utils/test_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ void record_matmul_texture3d(
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int32_t, Int) \
_(exec_aten::Half, Half) \
_(executorch::aten::Half, Half) \
_(float, Float) \
_(int8_t, QInt8)

Expand Down
6 changes: 3 additions & 3 deletions backends/vulkan/test/vulkan_compute_api_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -601,7 +601,7 @@ TEST_F(VulkanComputeAPITest, test_buffer_float16) {
if (!context()->adapter_ptr()->has_full_float16_buffers_support()) {
GTEST_SKIP();
}
test_storage_buffer_type<exec_aten::Half, vkapi::kHalf>(16);
test_storage_buffer_type<executorch::aten::Half, vkapi::kHalf>(16);
}

TEST_F(VulkanComputeAPITest, test_buffer_int8) {
Expand Down Expand Up @@ -683,7 +683,7 @@ TEST_F(VulkanComputeAPITest, buffer_tensor_sanity_check) {
run_buffer_tensor_sanity_check<float>(a);
break;
case vkapi::kHalf:
run_buffer_tensor_sanity_check<exec_aten::Half>(a);
run_buffer_tensor_sanity_check<executorch::aten::Half>(a);
break;
case vkapi::kChar:
run_buffer_tensor_sanity_check<int8_t>(a);
Expand Down Expand Up @@ -2528,7 +2528,7 @@ TEST(VulkanToFromGPUShaderTest, round_trip_tests) {

for (auto& sizes : to_test) {
RUN_TESTS(float, vkapi::kFloat)
RUN_TESTS(exec_aten::Half, vkapi::kHalf)
RUN_TESTS(executorch::aten::Half, vkapi::kHalf)
}

for (auto& sizes : to_test_int8) {
Expand Down
Loading