@@ -41,6 +41,20 @@ void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>&
4141 }
4242}
4343
44+ void OpenVINOInferenceAdapter::infer (const InferenceInput& input, InferenceOutput& output) {
45+ auto request = asyncQueue->operator [](asyncQueue->get_idle_request_id ());
46+ for (const auto & [name, tensor] : input) {
47+ request.set_tensor (name, tensor);
48+ }
49+ for (const auto & [name, tensor] : output) {
50+ request.set_tensor (name, tensor);
51+ }
52+ request.infer ();
53+ for (const auto & name : outputNames) {
54+ output[name] = request.get_tensor (name);
55+ }
56+ }
57+
4458InferenceOutput OpenVINOInferenceAdapter::infer (const InferenceInput& input) {
4559 auto request = asyncQueue->operator [](asyncQueue->get_idle_request_id ());
4660 // Fill input blobs
@@ -87,6 +101,9 @@ size_t OpenVINOInferenceAdapter::getNumAsyncExecutors() const {
87101ov::PartialShape OpenVINOInferenceAdapter::getInputShape (const std::string& inputName) const {
88102 return compiledModel.input (inputName).get_partial_shape ();
89103}
104+ ov::PartialShape OpenVINOInferenceAdapter::getOutputShape (const std::string& outputName) const {
105+ return compiledModel.output (outputName).get_partial_shape ();
106+ }
90107
91108void OpenVINOInferenceAdapter::initInputsOutputs () {
92109 for (const auto & input : compiledModel.inputs ()) {
@@ -97,6 +114,12 @@ void OpenVINOInferenceAdapter::initInputsOutputs() {
97114 outputNames.push_back (output.get_any_name ());
98115 }
99116}
117+ ov::element::Type_t OpenVINOInferenceAdapter::getInputDatatype (const std::string&) const {
118+ throw std::runtime_error (" Not implemented" );
119+ }
120+ ov::element::Type_t OpenVINOInferenceAdapter::getOutputDatatype (const std::string&) const {
121+ throw std::runtime_error (" Not implemented" );
122+ }
100123
101124std::vector<std::string> OpenVINOInferenceAdapter::getInputNames () const {
102125 return inputNames;
0 commit comments