Skip to content

Commit d15c492

Browse files
ivikhrevvladimir-dudnikanzhella-pankratova
authored
OV2.0 c++ mri reconstruction demo (#3086)
* start migration * handle multiple outputs in model * undo vs 2019 auto fomatting * use common functions * old/new api for showAvailableDevices for demos which are on the old api yet * fix merge arftifacts * use channels num to determine output * Update demos/mri_reconstruction_demo/cpp/main.cpp Co-authored-by: Anzhella Pankratova <[email protected]> * removed runtime namespace Co-authored-by: Vladimir Dudnik <[email protected]> Co-authored-by: Anzhella Pankratova <[email protected]>
1 parent fe52bbc commit d15c492

File tree

6 files changed

+62
-38
lines changed

6 files changed

+62
-38
lines changed

demos/common/cpp/utils/include/utils/common.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,13 @@ inline std::size_t getTensorBatch(const InferenceEngine::TensorDesc& desc) {
210210
}
211211

212212
inline void showAvailableDevices() {
213+
#if defined(OV_NEW_API)
214+
ov::Core core;
215+
std::vector<std::string> devices = core.get_available_devices();
216+
#else
213217
InferenceEngine::Core ie;
214218
std::vector<std::string> devices = ie.GetAvailableDevices();
219+
#endif
215220

216221
std::cout << std::endl;
217222
std::cout << "Available target devices:";

demos/common/cpp/utils/include/utils/ocv_common.hpp

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (C) 2018-2021 Intel Corporation
1+
// Copyright (C) 2018-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

@@ -11,15 +11,16 @@
1111

1212
#include <opencv2/opencv.hpp>
1313

14+
#include "openvino/openvino.hpp"
15+
1416
#include "utils/common.hpp"
1517
#include "utils/shared_blob_allocator.h"
1618

17-
#include "openvino/openvino.hpp"
1819

1920

20-
/**
21-
* @brief Get cv::Mat value in the correct format.
22-
*/
21+
/**
22+
* @brief Get cv::Mat value in the correct format.
23+
*/
2324
template <typename T>
2425
static const T getMatValue(const cv::Mat& mat, size_t h, size_t w, size_t c) {
2526
switch (mat.type()) {
@@ -112,8 +113,7 @@ static UNUSED void matToTensor(const cv::Mat& mat, const ov::Tensor& tensor, int
112113
for (size_t w = 0; w < width; w++)
113114
tensorData[batchOffset + c * width * height + h * width + w] =
114115
getMatValue<float_t>(resizedMat, h, w, c);
115-
}
116-
else {
116+
} else {
117117
uint8_t* tensorData = tensor.data<uint8_t>();
118118
if (resizedMat.depth() == CV_32F) {
119119
throw std::runtime_error("Conversion of cv::Mat from float_t to uint8_t is forbidden");
@@ -155,8 +155,8 @@ static UNUSED InferenceEngine::Blob::Ptr wrapMat2Blob(const cv::Mat& mat) {
155155
InferenceEngine::Precision precision = isMatFloat ?
156156
InferenceEngine::Precision::FP32 : InferenceEngine::Precision::U8;
157157
InferenceEngine::TensorDesc tDesc(precision,
158-
{ 1, channels, height, width },
159-
InferenceEngine::Layout::NHWC);
158+
{1, channels, height, width},
159+
InferenceEngine::Layout::NHWC);
160160

161161
InferenceEngine::Blob::Ptr blob;
162162
if (isMatFloat) {
@@ -195,12 +195,12 @@ static UNUSED ov::Tensor wrapMat2Tensor(const cv::Mat& mat) {
195195
* @param thickness - thickness of the lines used to draw a text.
196196
*/
197197
inline void putHighlightedText(const cv::Mat& frame,
198-
const std::string& message,
199-
cv::Point position,
200-
int fontFace,
201-
double fontScale,
202-
cv::Scalar color,
203-
int thickness) {
198+
const std::string& message,
199+
cv::Point position,
200+
int fontFace,
201+
double fontScale,
202+
cv::Scalar color,
203+
int thickness) {
204204
cv::putText(frame, message, position, fontFace, fontScale, cv::Scalar(255, 255, 255), thickness + 1);
205205
cv::putText(frame, message, position, fontFace, fontScale, color, thickness);
206206
}
@@ -217,7 +217,7 @@ class OutputTransform {
217217
float inputWidth = static_cast<float>(inputSize.width);
218218
float inputHeight = static_cast<float>(inputSize.height);
219219
scaleFactor = std::min(outputResolution.height / inputHeight, outputResolution.width / inputWidth);
220-
newResolution = cv::Size{ static_cast<int>(inputWidth * scaleFactor), static_cast<int>(inputHeight * scaleFactor) };
220+
newResolution = cv::Size{static_cast<int>(inputWidth * scaleFactor), static_cast<int>(inputHeight * scaleFactor)};
221221
return newResolution;
222222
}
223223

demos/mri_reconstruction_demo/cpp/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# Copyright (C) 2021-2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
#
4+
15
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
26
file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
37

demos/mri_reconstruction_demo/cpp/main.cpp

Lines changed: 35 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
// Copyright (C) 2018-2021 Intel Corporation
1+
// Copyright (C) 2021-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
4-
#include <inference_engine.hpp>
4+
#include <openvino/openvino.hpp>
55
#include <opencv2/opencv.hpp>
66
#include <utils/common.hpp>
77
#include <utils/performance_metrics.hpp>
@@ -12,7 +12,7 @@
1212

1313
bool ParseAndCheckCommandLine(int argc, char *argv[]);
1414

15-
static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
15+
static cv::Mat tensorToMat(const ov::Tensor& tensor);
1616

1717
struct MRIData {
1818
cv::Mat data;
@@ -38,16 +38,32 @@ int main(int argc, char** argv) {
3838
return 0;
3939
}
4040

41-
slog::info << *InferenceEngine::GetInferenceEngineVersion() << slog::endl;
42-
InferenceEngine::Core ie;
41+
slog::info << ov::get_openvino_version() << slog::endl;
42+
ov::Core core;
4343

44-
InferenceEngine::CNNNetwork net = ie.ReadNetwork(FLAGS_m);
45-
net.getInputsInfo().begin()->second->setLayout(InferenceEngine::Layout::NHWC);
44+
slog::info << "Reading model " << FLAGS_m << slog::endl;
45+
std::shared_ptr<ov::Model> model = core.read_model(FLAGS_m);
46+
logBasicModelInfo(model);
4647

47-
InferenceEngine::ExecutableNetwork execNet = ie.LoadNetwork(net, FLAGS_d);
48-
logExecNetworkInfo(execNet, FLAGS_m, FLAGS_d);
48+
std::string outputTensorName = "";
49+
ov::Layout outputLayout("NHWC");
50+
for (const auto& output : model->outputs()) {
51+
if (output.get_shape()[ov::layout::channels_idx(outputLayout)] == 1) {
52+
outputTensorName = output.get_any_name();
53+
}
54+
}
55+
if (outputTensorName.empty()) {
56+
throw std::logic_error("Not found suitable output!");
57+
}
58+
59+
ov::preprocess::PrePostProcessor ppp(model);
60+
ppp.input().model().set_layout("NHWC");
61+
model = ppp.build();
4962

50-
InferenceEngine::InferRequest infReq = execNet.CreateInferRequest();
63+
ov::CompiledModel compiledModel = core.compile_model(model, FLAGS_d);
64+
logCompiledModelInfo(compiledModel, FLAGS_m, FLAGS_d);
65+
66+
ov::InferRequest infReq = compiledModel.create_infer_request();
5167

5268
// Hybrid-CS-Model-MRI/Data/sampling_mask_20perc.npy
5369
MRIData mri;
@@ -65,8 +81,8 @@ int main(int argc, char** argv) {
6581

6682
slog::info << "Compute..." << slog::endl;
6783

68-
cv::Mat inputBlob = infEngineBlobToMat(infReq.GetBlob(net.getInputsInfo().begin()->first));
69-
cv::Mat outputBlob = infEngineBlobToMat(infReq.GetBlob(net.getOutputsInfo().begin()->first));
84+
cv::Mat inputBlob = tensorToMat(infReq.get_input_tensor());
85+
cv::Mat outputBlob = tensorToMat(infReq.get_tensor(outputTensorName));
7086
outputBlob = outputBlob.reshape(1, height);
7187

7288
const auto startTime = std::chrono::steady_clock::now();
@@ -77,9 +93,8 @@ int main(int argc, char** argv) {
7793
kspace.setTo(0, mri.samplingMask);
7894
kspace = (kspace - cv::Scalar(mri.stats[0], mri.stats[0])) / cv::Scalar(mri.stats[1], mri.stats[1]);
7995
kspace.reshape(1, 1).convertTo(inputBlob.reshape(1, 1), CV_32F);
80-
8196
// Forward pass
82-
infReq.Infer();
97+
infReq.infer();
8398

8499
// Save prediction
85100
cv::Mat slice(height, width, CV_8UC1, mri.reconstructed.ptr<uint8_t>(i));
@@ -102,13 +117,13 @@ int main(int argc, char** argv) {
102117
return 0;
103118
}
104119

105-
cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) {
120+
cv::Mat tensorToMat(const ov::Tensor& tensor) {
106121
// NOTE: Inference Engine sizes are reversed.
107-
std::vector<size_t> dims = blob->getTensorDesc().getDims();
108-
std::vector<int> size(dims.begin(), dims.end());
109-
auto precision = blob->getTensorDesc().getPrecision();
110-
CV_Assert(precision == InferenceEngine::Precision::FP32);
111-
return cv::Mat(size, CV_32F, (void*)blob->buffer());
122+
ov::Shape tensorShape = tensor.get_shape();
123+
std::vector<int> size(tensorShape.begin(), tensorShape.end());
124+
ov::element::Type precision = tensor.get_element_type();
125+
CV_Assert(precision == ov::element::f32);
126+
return cv::Mat(size, CV_32F, (void*)tensor.data());
112127
}
113128

114129
void callback(int sliceId, void* userdata) {

demos/mri_reconstruction_demo/cpp/mri_reconstruction_demo.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (C) 2018-2021 Intel Corporation
1+
// Copyright (C) 2021-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

demos/mri_reconstruction_demo/cpp/npy_reader.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (C) 2018-2021 Intel Corporation
1+
// Copyright (C) 2021-2022 Intel Corporation
22
// SPDX-License-Identifier: Apache-2.0
33
//
44
#pragma once

0 commit comments

Comments
 (0)