| 
30 | 30 | #include <type_traits>  | 
31 | 31 | #include <vector>  | 
32 | 32 | 
 
  | 
33 |  | -namespace torch {  | 
34 |  | -namespace executor {  | 
 | 33 | +namespace executorch {  | 
 | 34 | +namespace backends {  | 
35 | 35 | namespace vulkan {  | 
36 | 36 | namespace {  | 
37 | 37 | 
 
  | 
 | 38 | +using executorch::runtime::ArrayRef;  | 
 | 39 | +using executorch::runtime::Backend;  | 
 | 40 | +using executorch::runtime::BackendExecutionContext;  | 
 | 41 | +using executorch::runtime::BackendInitContext;  | 
 | 42 | +using executorch::runtime::CompileSpec;  | 
 | 43 | +using executorch::runtime::DelegateHandle;  | 
 | 44 | +using executorch::runtime::Error;  | 
 | 45 | +using executorch::runtime::EValue;  | 
 | 46 | +using executorch::runtime::FreeableBuffer;  | 
 | 47 | +using executorch::runtime::kTensorDimensionLimit;  | 
 | 48 | +using executorch::runtime::Result;  | 
 | 49 | + | 
38 | 50 | using namespace vkcompute;  | 
39 | 51 | 
 
  | 
40 | 52 | // Flatbuffer types  | 
@@ -357,7 +369,7 @@ class GraphBuilder {  | 
357 | 369 | bool maybe_resize_input(  | 
358 | 370 |     ComputeGraph* graph,  | 
359 | 371 |     const size_t input_i,  | 
360 |  | -    exec_aten::Tensor& et_tensor) {  | 
 | 372 | +    executorch::aten::Tensor& et_tensor) {  | 
361 | 373 |   ValueRef in_tensor_ref = graph->inputs()[input_i].value;  | 
362 | 374 |   vTensorPtr in_tensor = graph->get_tensor(in_tensor_ref);  | 
363 | 375 | 
 
  | 
@@ -392,17 +404,18 @@ bool maybe_resize_input(  | 
392 | 404 | void maybe_resize_output(  | 
393 | 405 |     ComputeGraph* graph,  | 
394 | 406 |     const size_t output_i,  | 
395 |  | -    exec_aten::Tensor& et_tensor) {  | 
 | 407 | +    executorch::aten::Tensor& et_tensor) {  | 
396 | 408 |   ValueRef out_tensor_ref = graph->outputs()[output_i].value;  | 
397 | 409 |   vTensorPtr out_tensor = graph->get_tensor(out_tensor_ref);  | 
398 | 410 | 
 
  | 
399 |  | -  exec_aten::SizesType new_output_size[kTensorDimensionLimit];  | 
 | 411 | +  executorch::aten::SizesType new_output_size[kTensorDimensionLimit];  | 
400 | 412 |   size_t ndim = out_tensor->sizes().size();  | 
401 | 413 |   for (int i = 0; i < ndim; ++i) {  | 
402 | 414 |     new_output_size[i] = out_tensor->sizes()[i];  | 
403 | 415 |   }  | 
404 | 416 | 
 
  | 
405 |  | -  exec_aten::ArrayRef<exec_aten::SizesType> output_size{new_output_size, ndim};  | 
 | 417 | +  executorch::aten::ArrayRef<executorch::aten::SizesType> output_size{  | 
 | 418 | +      new_output_size, ndim};  | 
406 | 419 |   Error err = resize_tensor(et_tensor, output_size);  | 
407 | 420 | 
 
  | 
408 | 421 |   ET_CHECK_MSG(err == Error::Ok, "Failed to resize output tensor.");  | 
@@ -555,5 +568,5 @@ static auto success_with_compiler = register_backend(backend);  | 
555 | 568 | 
 
  | 
556 | 569 | } // namespace  | 
557 | 570 | } // namespace vulkan  | 
558 |  | -} // namespace executor  | 
559 |  | -} // namespace torch  | 
 | 571 | +} // namespace backends  | 
 | 572 | +} // namespace executorch  | 
0 commit comments