1
- // Copyright (C) 2018- 2021 Intel Corporation
1
+ // Copyright (C) 2021-2022 Intel Corporation
2
2
// SPDX-License-Identifier: Apache-2.0
3
3
//
4
- #include < inference_engine .hpp>
4
+ #include < openvino/openvino .hpp>
5
5
#include < opencv2/opencv.hpp>
6
6
#include < utils/common.hpp>
7
7
#include < utils/performance_metrics.hpp>
12
12
13
13
bool ParseAndCheckCommandLine (int argc, char *argv[]);
14
14
15
- static cv::Mat infEngineBlobToMat (const InferenceEngine::Blob::Ptr& blob );
15
+ static cv::Mat tensorToMat (const ov::Tensor& tensor );
16
16
17
17
struct MRIData {
18
18
cv::Mat data;
@@ -38,16 +38,32 @@ int main(int argc, char** argv) {
38
38
return 0 ;
39
39
}
40
40
41
- slog::info << * InferenceEngine::GetInferenceEngineVersion () << slog::endl;
42
- InferenceEngine ::Core ie ;
41
+ slog::info << ov::get_openvino_version () << slog::endl;
42
+ ov ::Core core ;
43
43
44
- InferenceEngine::CNNNetwork net = ie.ReadNetwork (FLAGS_m);
45
- net.getInputsInfo ().begin ()->second ->setLayout (InferenceEngine::Layout::NHWC);
44
+ slog::info << " Reading model " << FLAGS_m << slog::endl;
45
+ std::shared_ptr<ov::Model> model = core.read_model (FLAGS_m);
46
+ logBasicModelInfo (model);
46
47
47
- InferenceEngine::ExecutableNetwork execNet = ie.LoadNetwork (net, FLAGS_d);
48
- logExecNetworkInfo (execNet, FLAGS_m, FLAGS_d);
48
+ std::string outputTensorName = " " ;
49
+ ov::Layout outputLayout (" NHWC" );
50
+ for (const auto & output : model->outputs ()) {
51
+ if (output.get_shape ()[ov::layout::channels_idx (outputLayout)] == 1 ) {
52
+ outputTensorName = output.get_any_name ();
53
+ }
54
+ }
55
+ if (outputTensorName.empty ()) {
56
+ throw std::logic_error (" Not found suitable output!" );
57
+ }
58
+
59
+ ov::preprocess::PrePostProcessor ppp (model);
60
+ ppp.input ().model ().set_layout (" NHWC" );
61
+ model = ppp.build ();
49
62
50
- InferenceEngine::InferRequest infReq = execNet.CreateInferRequest ();
63
+ ov::CompiledModel compiledModel = core.compile_model (model, FLAGS_d);
64
+ logCompiledModelInfo (compiledModel, FLAGS_m, FLAGS_d);
65
+
66
+ ov::InferRequest infReq = compiledModel.create_infer_request ();
51
67
52
68
// Hybrid-CS-Model-MRI/Data/sampling_mask_20perc.npy
53
69
MRIData mri;
@@ -65,8 +81,8 @@ int main(int argc, char** argv) {
65
81
66
82
slog::info << " Compute..." << slog::endl;
67
83
68
- cv::Mat inputBlob = infEngineBlobToMat (infReq.GetBlob (net. getInputsInfo (). begin ()-> first ));
69
- cv::Mat outputBlob = infEngineBlobToMat (infReq.GetBlob (net. getOutputsInfo (). begin ()-> first ));
84
+ cv::Mat inputBlob = tensorToMat (infReq.get_input_tensor ( ));
85
+ cv::Mat outputBlob = tensorToMat (infReq.get_tensor (outputTensorName ));
70
86
outputBlob = outputBlob.reshape (1 , height);
71
87
72
88
const auto startTime = std::chrono::steady_clock::now ();
@@ -77,9 +93,8 @@ int main(int argc, char** argv) {
77
93
kspace.setTo (0 , mri.samplingMask );
78
94
kspace = (kspace - cv::Scalar (mri.stats [0 ], mri.stats [0 ])) / cv::Scalar (mri.stats [1 ], mri.stats [1 ]);
79
95
kspace.reshape (1 , 1 ).convertTo (inputBlob.reshape (1 , 1 ), CV_32F);
80
-
81
96
// Forward pass
82
- infReq.Infer ();
97
+ infReq.infer ();
83
98
84
99
// Save prediction
85
100
cv::Mat slice (height, width, CV_8UC1, mri.reconstructed .ptr <uint8_t >(i));
@@ -102,13 +117,13 @@ int main(int argc, char** argv) {
102
117
return 0 ;
103
118
}
104
119
105
- cv::Mat infEngineBlobToMat (const InferenceEngine::Blob::Ptr& blob ) {
120
+ cv::Mat tensorToMat (const ov::Tensor& tensor ) {
106
121
// NOTE: Inference Engine sizes are reversed.
107
- std::vector< size_t > dims = blob-> getTensorDesc (). getDims ();
108
- std::vector<int > size (dims .begin (), dims .end ());
109
- auto precision = blob-> getTensorDesc (). getPrecision ();
110
- CV_Assert (precision == InferenceEngine::Precision::FP32 );
111
- return cv::Mat (size, CV_32F, (void *)blob-> buffer ());
122
+ ov::Shape tensorShape = tensor. get_shape ();
123
+ std::vector<int > size (tensorShape .begin (), tensorShape .end ());
124
+ ov::element::Type precision = tensor. get_element_type ();
125
+ CV_Assert (precision == ov::element:: f32 );
126
+ return cv::Mat (size, CV_32F, (void *)tensor. data ());
112
127
}
113
128
114
129
void callback (int sliceId, void * userdata) {
0 commit comments