Skip to content

Commit 130c8a4

Browse files
authored
Merge pull request #48 from m12watanabe1a/chore/openvino-2023
Fix to suppress warnings of YOLOX
2 parents 0a58aa2 + e71d9d2 commit 130c8a4

File tree

3 files changed

+29
-70
lines changed

3 files changed

+29
-70
lines changed

yolox_ros_cpp/yolox_cpp/CMakeLists.txt

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,11 @@ set(ENABLE_TFLITE OFF)
3737

3838
if(YOLOX_USE_OPENVINO)
3939
find_package(OpenVINO REQUIRED)
40-
find_package(InferenceEngine REQUIRED)
41-
find_package(ngraph REQUIRED)
4240

4341
set(ENABLE_OPENVINO ON)
4442
set(TARGET_SRC src/yolox_openvino.cpp)
45-
set(TARGET_LIBS InferenceEngine ngraph)
46-
set(TARGET_DPENDENCIES OpenVINO InferenceEngine ngraph)
43+
set(TARGET_LIBS openvino::runtime)
44+
set(TARGET_DPENDENCIES OpenVINO)
4745
endif()
4846

4947
if(YOLOX_USE_TENSORRT)

yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_openvino.hpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <vector>
88
#include <iostream>
99
#include <opencv2/opencv.hpp>
10-
#include <inference_engine.hpp>
10+
#include <openvino/openvino.hpp>
1111

1212
#include "core.hpp"
1313
#include "coco_names.hpp"
@@ -22,11 +22,9 @@ namespace yolox_cpp{
2222

2323
private:
2424
std::string device_name_;
25-
std::string input_name_;
26-
std::string output_name_;
27-
InferenceEngine::CNNNetwork network_;
28-
InferenceEngine::ExecutableNetwork executable_network_;
29-
InferenceEngine::InferRequest infer_request_;
25+
std::vector<float> blob_;
26+
ov::Shape input_shape_;
27+
ov::InferRequest infer_request_;
3028
};
3129
}
3230

yolox_ros_cpp/yolox_cpp/src/yolox_openvino.cpp

Lines changed: 23 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
#include "yolox_cpp/yolox_openvino.hpp"
22

33
namespace yolox_cpp{
4-
using namespace InferenceEngine;
5-
64
YoloXOpenVINO::YoloXOpenVINO(file_name_t path_to_model, std::string device_name,
75
float nms_th, float conf_th, std::string model_version,
86
int num_classes, bool p6)
@@ -11,55 +9,36 @@ namespace yolox_cpp{
119
{
1210
// Step 1. Initialize inference engine core
1311
std::cout << "Initialize Inference engine core" << std::endl;
14-
Core ie;
12+
ov::Core ie;
1513

1614
// Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
1715
// .bin files) or ONNX (.onnx file) format
1816
std::cout << "Read a model in OpenVINO Intermediate Representation: " << path_to_model << std::endl;
19-
network_ = ie.ReadNetwork(path_to_model);
20-
if (network_.getOutputsInfo().size() != 1)
21-
throw std::logic_error("Sample supports topologies with 1 output only");
22-
if (network_.getInputsInfo().size() != 1)
23-
throw std::logic_error("Sample supports topologies with 1 input only");
17+
const auto network = ie.read_model(path_to_model);
2418

25-
// Step 3. Configure input & output
26-
std::cout << "Configuring input and output blobs" << std::endl;
27-
// Prepare input blobs
28-
InputInfo::Ptr input_info = network_.getInputsInfo().begin()->second;
29-
input_name_ = network_.getInputsInfo().begin()->first;
19+
// Step 3. Loading a model to the device
20+
std::cout << "Loading a model to the device: " << device_name_ << std::endl;
21+
auto compiled_model = ie.compile_model(network, device_name);
22+
23+
// Step 4. Create an infer request
24+
std::cout << "Create an infer request" << std::endl;
25+
this->infer_request_ = compiled_model.create_infer_request();
3026

27+
// Step 5. Configure input & output
28+
std::cout << "Configuring input and output blobs" << std::endl;
29+
this->input_shape_ = compiled_model.input(0).get_shape();
3130
/* Mark input as resizable by setting of a resize algorithm.
3231
* In this case we will be able to set an input blob of any shape to an
3332
* infer request. Resize and layout conversions are executed automatically
3433
* during inference */
35-
//input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
36-
//input_info->setLayout(Layout::NHWC);
37-
input_info->setPrecision(Precision::FP32);
38-
auto input_dims = input_info->getInputData()->getDims();
39-
this->input_h_ = input_dims[2];
40-
this->input_w_ = input_dims[3];
34+
this->blob_.resize(
35+
this->input_shape_.at(0) * this->input_shape_.at(1) *
36+
this->input_shape_.at(2) * this->input_shape_.at(3));
37+
this->input_h_ = this->input_shape_.at(2);
38+
this->input_w_ = this->input_shape_.at(3);
4139
std::cout << "INPUT_HEIGHT: " << this->input_h_ << std::endl;
4240
std::cout << "INPUT_WIDTH: " << this->input_w_ << std::endl;
4341

44-
// Prepare output blobs
45-
if (network_.getOutputsInfo().empty()) {
46-
std::cerr << "Network outputs info is empty" << std::endl;
47-
throw std :: runtime_error( "Network outputs info is empty" );
48-
}
49-
DataPtr output_info = network_.getOutputsInfo().begin()->second;
50-
output_name_ = network_.getOutputsInfo().begin()->first;
51-
52-
// output_info->setPrecision(Precision::FP16);
53-
output_info->setPrecision(Precision::FP32);
54-
55-
// Step 4. Loading a model to the device
56-
std::cout << "Loading a model to the device: " << device_name_ << std::endl;
57-
executable_network_ = ie.LoadNetwork(network_, device_name_);
58-
59-
// Step 5. Create an infer request
60-
std::cout << "Create an infer request" << std::endl;
61-
infer_request_ = executable_network_.CreateInferRequest();
62-
6342
// Prepare GridAndStrides
6443
if(this->p6_)
6544
{
@@ -75,34 +54,18 @@ namespace yolox_cpp{
7554
{
7655
// preprocess
7756
cv::Mat pr_img = static_resize(frame);
78-
InferenceEngine::Blob::Ptr imgBlob = infer_request_.GetBlob(input_name_);
79-
InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(imgBlob);
80-
if (!mblob)
81-
{
82-
THROW_IE_EXCEPTION << "We expect blob to be inherited from MemoryBlob in matU8ToBlob, "
83-
<< "but by fact we were not able to cast inputBlob to MemoryBlob";
84-
}
8557
// locked memory holder should be alive all time while access to its buffer happens
86-
auto mblobHolder = mblob->wmap();
87-
float *blob_data = mblobHolder.as<float *>();
88-
blobFromImage(pr_img, blob_data);
58+
blobFromImage(pr_img, this->blob_.data());
8959

9060
// do inference
9161
/* Running the request synchronously */
92-
infer_request_.Infer();
62+
this->infer_request_.set_input_tensor(
63+
ov::Tensor{ov::element::f32, this->input_shape_, reinterpret_cast<float *>(this->blob_.data())});
64+
infer_request_.infer();
9365

94-
// Process output
95-
const InferenceEngine::Blob::Ptr output_blob = infer_request_.GetBlob(output_name_);
96-
InferenceEngine::MemoryBlob::CPtr moutput = as<InferenceEngine::MemoryBlob>(output_blob);
97-
if (!moutput) {
98-
throw std::logic_error("We expect output to be inherited from MemoryBlob, "
99-
"but by fact we were not able to cast output to MemoryBlob");
100-
}
10166

102-
// locked memory holder should be alive all time while access to its buffer
103-
// happens
104-
auto moutputHolder = moutput->rmap();
105-
const float* net_pred = moutputHolder.as<const PrecisionTrait<Precision::FP32>::value_type*>();
67+
const auto &output_tensor = this->infer_request_.get_output_tensor();
68+
const float* net_pred = reinterpret_cast<float *>(output_tensor.data());
10669

10770
float scale = std::min(input_w_ / (frame.cols*1.0), input_h_ / (frame.rows*1.0));
10871
std::vector<Object> objects;

0 commit comments

Comments
 (0)