Skip to content

Commit 8f1b141

Browse files
committed
Use generic ov::Tensor for mapping buffers
The set_input_tensor uses the generic type ov::Tensor for it's function argument. Thus, the remote_tensor type gets passed as ov::Tensor. Hence, the tensor_handle_map_ can use ov::Tensor for the tensor mappings. Signed-off-by: Anoob Anto Kodankandath <[email protected]>
1 parent aa0d5ab commit 8f1b141

File tree

2 files changed

+13
-13
lines changed

2 files changed

+13
-13
lines changed

litert/vendors/intel_openvino/dispatch/device_context.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,9 @@ class LiteRtDispatchDeviceContextT {
4242
LiteRtTensorBufferHandle tensor_buffer_handle);
4343

4444
#if defined(LITERT_WINDOWS_OS)
45-
litert::Expected<ov::intel_npu::level_zero::ZeroBufferTensor> getRemoteTensor(
45+
litert::Expected<ov::intel_npu::level_zero::ZeroBufferTensor> getOvTensor(
4646
#else
47-
litert::Expected<ov::RemoteTensor> getRemoteTensor(
47+
litert::Expected<ov::Tensor> getOvTensor(
4848
#endif
4949
const LiteRtTensorBufferHandle& handle) const {
5050
auto it = tensor_handle_map_.find(handle);
@@ -68,7 +68,7 @@ class LiteRtDispatchDeviceContextT {
6868
ov::intel_npu::level_zero::ZeroBufferTensor>
6969
tensor_handle_map_;
7070
#else
71-
std::unordered_map<LiteRtTensorBufferHandle, ov::RemoteTensor>
71+
std::unordered_map<LiteRtTensorBufferHandle, ov::Tensor>
7272
tensor_handle_map_;
7373
#endif
7474
uint64_t next_handle_;

litert/vendors/intel_openvino/dispatch/invocation_context.cc

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -103,33 +103,33 @@ litert::Expected<void> LiteRtDispatchInvocationContextT::AttachInput(
103103
int graph_input_index, LiteRtTensorBufferHandle tensor_buffer_handle) {
104104
#if defined(LITERT_WINDOWS_OS)
105105
LITERT_ASSIGN_OR_RETURN(
106-
ov::intel_npu::level_zero::ZeroBufferTensor remote_tensor,
107-
device_context_.getRemoteTensor(tensor_buffer_handle));
106+
ov::intel_npu::level_zero::ZeroBufferTensor ov_tensor,
107+
device_context_.getOvTensor(tensor_buffer_handle));
108108
#else
109109
LITERT_ASSIGN_OR_RETURN(
110-
ov::RemoteTensor remote_tensor,
111-
device_context_.getRemoteTensor(tensor_buffer_handle));
110+
ov::Tensor ov_tensor,
111+
device_context_.getOvTensor(tensor_buffer_handle));
112112
#endif
113113
// TODO: visit this if need to maintain graph indices for inputs and outputs
114114
// in dispatch_api
115-
infer_request_.set_input_tensor(graph_input_index, remote_tensor);
115+
infer_request_.set_input_tensor(graph_input_index, ov_tensor);
116116
return {};
117117
}
118118

119119
litert::Expected<void> LiteRtDispatchInvocationContextT::AttachOutput(
120120
int graph_output_index, LiteRtTensorBufferHandle tensor_buffer_handle) {
121121
#if defined(LITERT_WINDOWS_OS)
122122
LITERT_ASSIGN_OR_RETURN(
123-
ov::intel_npu::level_zero::ZeroBufferTensor remote_tensor,
124-
device_context_.getRemoteTensor(tensor_buffer_handle));
123+
ov::intel_npu::level_zero::ZeroBufferTensor ov_tensor,
124+
device_context_.getOvTensor(tensor_buffer_handle));
125125
#else
126126
LITERT_ASSIGN_OR_RETURN(
127-
ov::RemoteTensor remote_tensor,
128-
device_context_.getRemoteTensor(tensor_buffer_handle));
127+
ov::Tensor ov_tensor,
128+
device_context_.getOvTensor(tensor_buffer_handle));
129129
#endif
130130
// TODO: visit this if need to maintain graph indices for inputs and outputs
131131
// in dispatch_api
132-
infer_request_.set_output_tensor(graph_output_index, remote_tensor);
132+
infer_request_.set_output_tensor(graph_output_index, ov_tensor);
133133
return {};
134134
}
135135

0 commit comments

Comments
 (0)