Skip to content

Commit bc92ceb

Browse files
apankratovantonpilya-lavrenovvladimir-dudnik
authored
Renamed streams property (#3254)
* Renamed streams property * fix compilation * suppress warnings, fix compilation * compilation, wrap n to ov::streams::Num * compilation of streams Co-authored-by: Ilya Lavrenov <[email protected]> Co-authored-by: Vladimir Dudnik <[email protected]>
1 parent 776c294 commit bc92ceb

File tree

10 files changed

+30
-32
lines changed

10 files changed

+30
-32
lines changed

demos/background_subtraction_demo/cpp_gapi/src/custom_kernels.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ static cv::Rect expandBox(const float x0,
2626
const float h_half = ((y1 - y0) * 0.5f) * scale;
2727
const float x_c = (x1 + x0) * 0.5f;
2828
const float y_c = (y1 + y0) * 0.5f;
29-
cv::Point tl(x_c - w_half, y_c - h_half);
30-
cv::Point br(x_c + w_half, y_c + h_half);
29+
cv::Point tl((int)(x_c - w_half), (int)(y_c - h_half));
30+
cv::Point br((int)(x_c + w_half), (int)(y_c + h_half));
3131
return cv::Rect(tl, br);
3232
}
3333

demos/common/cpp/models/src/segmentation_model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ std::unique_ptr<ResultBase> SegmentationModel::postprocess(InferenceResult& infR
119119
cv::Mat predictions(outHeight, outWidth, CV_32SC1);
120120
const auto data = outTensor.data<int64_t>();
121121
for (size_t i = 0; i < predictions.total(); ++i) {
122-
reinterpret_cast<int32_t*>(predictions.data)[i] = data[i];
122+
reinterpret_cast<int32_t*>(predictions.data)[i] = int32_t(data[i]);
123123
}
124124
predictions.convertTo(result->resultImage, CV_8UC1);
125125
}

demos/common/cpp/utils/include/utils/args_helper.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ std::vector<std::string> split(const std::string& s, char delim);
3535

3636
std::vector<std::string> parseDevices(const std::string& device_string);
3737

38-
std::map<std::string, uint32_t> parseValuePerDevice(const std::set<std::string>& devices,
39-
const std::string& values_string);
38+
std::map<std::string, int32_t> parseValuePerDevice(const std::set<std::string>& devices,
39+
const std::string& values_string);
4040

4141
cv::Size stringToSize(const std::string& str);
4242

demos/common/cpp/utils/include/utils/common.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ inline void logCompiledModelInfo(
152152
for (const auto& device : devices) {
153153
try {
154154
slog::info << "\tDevice: " << device << slog::endl;
155-
int32_t nstreams = compiledModel.get_property(ov::num_streams);
155+
int32_t nstreams = compiledModel.get_property(ov::streams::num);
156156
slog::info << "\t\tNumber of streams: " << nstreams << slog::endl;
157157
if (device == "CPU") {
158158
int32_t nthreads = compiledModel.get_property(ov::inference_num_threads);

demos/common/cpp/utils/src/args_helper.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,14 +93,14 @@ std::vector<std::string> parseDevices(const std::string& device_string) {
9393
}
9494

9595
// Format: <device1>:<value1>,<device2>:<value2> or just <value>
96-
std::map<std::string, uint32_t> parseValuePerDevice(const std::set<std::string>& devices,
97-
const std::string& values_string) {
96+
std::map<std::string, int32_t> parseValuePerDevice(const std::set<std::string>& devices,
97+
const std::string& values_string) {
9898
auto values_string_upper = values_string;
9999
std::transform(values_string_upper.begin(),
100100
values_string_upper.end(),
101101
values_string_upper.begin(),
102102
[](unsigned char c){ return std::toupper(c); });
103-
std::map<std::string, uint32_t> result;
103+
std::map<std::string, int32_t> result;
104104
auto device_value_strings = split(values_string_upper, ',');
105105
for (auto& device_value_string : device_value_strings) {
106106
auto device_value_vec = split(device_value_string, ':');

demos/common/cpp/utils/src/config_factory.cpp

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525
std::set<std::string> ModelConfig::getDevices() {
2626
if (devices.empty()) {
27-
for (const std::string& device : ::parseDevices(deviceName)) {
27+
for (const std::string& device : parseDevices(deviceName)) {
2828
devices.insert(device);
2929
}
3030
}
@@ -36,21 +36,18 @@ ModelConfig ConfigFactory::getUserConfig(const std::string& flags_d,
3636
uint32_t flags_nireq, const std::string& flags_nstreams, uint32_t flags_nthreads) {
3737
auto config = getCommonConfig(flags_d, flags_nireq);
3838

39-
std::map<std::string, unsigned> deviceNstreams = parseValuePerDevice(config.getDevices(), flags_nstreams);
39+
std::map<std::string, int> deviceNstreams = parseValuePerDevice(config.getDevices(), flags_nstreams);
4040
for (const auto& device : config.getDevices()) {
4141
if (device == "CPU") { // CPU supports a few special performance-oriented keys
4242
// limit threading for CPU portion of inference
4343
if (flags_nthreads != 0)
4444
config.compiledModelConfig.emplace(ov::inference_num_threads.name(), flags_nthreads);
4545

4646
config.compiledModelConfig.emplace(ov::affinity.name(), ov::Affinity::NONE);
47-
}
48-
else if (device == "GPU") {
49-
config.compiledModelConfig.emplace(ov::num_streams(deviceNstreams.count(device) > 0
50-
? (int)deviceNstreams.at(device)
51-
: ov::NumStreams::AUTO));
52-
if (flags_d.find("MULTI") != std::string::npos
53-
&& config.getDevices().find("CPU") != config.getDevices().end()) {
47+
} else if (device == "GPU") {
48+
ov::streams::Num nstreams = deviceNstreams.count(device) > 0 ? ov::streams::Num(deviceNstreams[device]) : ov::streams::AUTO;
49+
config.compiledModelConfig.emplace(device, nstreams);
50+
if (flags_d.find("MULTI") != std::string::npos && config.getDevices().find("CPU") != config.getDevices().end()) {
5451
// multi-device execution with the CPU + GPU performs best with GPU throttling hint,
5552
// which releases another CPU thread (that is otherwise used by the GPU driver for active polling)
5653
config.compiledModelConfig.emplace(ov::intel_gpu::hint::queue_throttle.name(),
@@ -65,10 +62,10 @@ ModelConfig ConfigFactory::getMinLatencyConfig(const std::string& flags_d, uint3
6562
auto config = getCommonConfig(flags_d, flags_nireq);
6663
for (const auto& device : config.getDevices()) {
6764
if (device == "CPU") { // CPU supports a few special performance-oriented keys
68-
config.compiledModelConfig.emplace(ov::num_streams.name(), 1);
65+
config.compiledModelConfig.emplace(ov::streams::num.name(), 1);
6966
}
7067
else if (device == "GPU") {
71-
config.compiledModelConfig.emplace(ov::num_streams.name(), 1);
68+
config.compiledModelConfig.emplace(ov::streams::num.name(), 1);
7269
}
7370
}
7471
return config;

demos/mri_reconstruction_demo/cpp/main.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,8 @@ int main(int argc, char** argv) {
120120
cv::Mat tensorToMat(const ov::Tensor& tensor) {
121121
// NOTE: Inference Engine sizes are reversed.
122122
ov::Shape tensorShape = tensor.get_shape();
123-
std::vector<int> size(tensorShape.begin(), tensorShape.end());
123+
std::vector<int> size;
124+
std::transform(tensorShape.begin(), tensorShape.end(), std::back_inserter(size), [](size_t dim) -> int { return int(dim); });
124125
ov::element::Type precision = tensor.get_element_type();
125126
CV_Assert(precision == ov::element::f32);
126127
return cv::Mat(size, CV_32F, (void*)tensor.data());

demos/security_barrier_camera_demo/cpp/main.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -712,23 +712,23 @@ int main(int argc, char* argv[]) {
712712
devices.insert(device);
713713
}
714714
}
715-
std::map<std::string, uint32_t> device_nstreams = parseValuePerDevice(devices, FLAGS_nstreams);
715+
std::map<std::string, int32_t> device_nstreams = parseValuePerDevice(devices, FLAGS_nstreams);
716716

717717
for (const std::string& device : devices) {
718718
if ("CPU" == device) {
719719
if (FLAGS_nthreads != 0) {
720720
core.set_property("CPU", ov::inference_num_threads(FLAGS_nthreads));
721721
}
722722
core.set_property("CPU", ov::affinity(ov::Affinity::NONE));
723-
core.set_property("CPU", ov::num_streams((device_nstreams.count("CPU") > 0 ? device_nstreams.at("CPU") : ov::NumStreams::AUTO)));
723+
core.set_property("CPU", ov::streams::num((device_nstreams.count("CPU") > 0 ? ov::streams::Num(device_nstreams["CPU"]) : ov::streams::AUTO)));
724724

725-
device_nstreams["CPU"] = core.get_property("CPU", ov::num_streams);
725+
device_nstreams["CPU"] = core.get_property("CPU", ov::streams::num);
726726
}
727727

728728
if ("GPU" == device) {
729-
core.set_property("GPU", ov::num_streams(device_nstreams.count("GPU") > 0 ? device_nstreams.at("GPU") : ov::NumStreams::AUTO));
729+
core.set_property("GPU", ov::streams::num(device_nstreams.count("GPU") > 0 ? ov::streams::Num(device_nstreams["GPU"]) : ov::streams::AUTO));
730730

731-
device_nstreams["GPU"] = core.get_property("GPU", ov::num_streams);
731+
device_nstreams["GPU"] = core.get_property("GPU", ov::streams::num);
732732
if (devices.end() != devices.find("CPU")) {
733733
// multi-device execution with the CPU + GPU performs best with GPU trottling hint,
734734
// which releases another CPU thread (that is otherwise used by the GPU driver for active polling)

demos/smart_classroom_demo/cpp/src/reid_gallery.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ std::vector<int> EmbeddingsGallery::GetIDsByEmbeddings(const std::vector<cv::Mat
116116
auto matched_idx = matcher.Solve(distances);
117117
std::vector<int> output_ids;
118118
for (auto col_idx : matched_idx) {
119-
if (col_idx == -1) {
119+
if (int(col_idx) == -1) {
120120
output_ids.push_back(unknown_id);
121121
continue;
122122
}

demos/social_distance_demo/cpp/main.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -689,22 +689,22 @@ int main(int argc, char* argv[]) {
689689
devices.insert(device);
690690
}
691691
}
692-
std::map<std::string, uint32_t> deviceNStreams = parseValuePerDevice(devices, FLAGS_nstreams);
692+
std::map<std::string, int32_t> deviceNStreams = parseValuePerDevice(devices, FLAGS_nstreams);
693693

694694
for (const std::string& device : devices) {
695695
if ("CPU" == device) {
696696
if (FLAGS_nthreads != 0) {
697697
core.set_property("CPU", ov::inference_num_threads(FLAGS_nthreads));
698698
}
699699
core.set_property("CPU", ov::affinity(ov::Affinity::NONE));
700-
core.set_property("CPU", ov::num_streams((deviceNStreams.count("CPU") > 0 ? deviceNStreams.at("CPU") : ov::NumStreams::AUTO)));
701-
deviceNStreams["CPU"] = core.get_property("CPU", ov::num_streams);
700+
core.set_property("CPU", ov::streams::num((deviceNStreams.count("CPU") > 0 ? ov::streams::Num(deviceNStreams["CPU"]) : ov::streams::AUTO)));
701+
deviceNStreams["CPU"] = core.get_property("CPU", ov::streams::num);
702702
}
703703

704704
if ("GPU" == device) {
705-
core.set_property("GPU", ov::num_streams(deviceNStreams.count("GPU") > 0 ? deviceNStreams.at("GPU") : ov::NumStreams::AUTO));
705+
core.set_property("GPU", ov::streams::num(deviceNStreams.count("GPU") > 0 ? ov::streams::Num(deviceNStreams["GPU"]) : ov::streams::AUTO));
706706

707-
deviceNStreams["GPU"] = core.get_property("GPU", ov::num_streams);
707+
deviceNStreams["GPU"] = core.get_property("GPU", ov::streams::num);
708708
if (devices.end() != devices.find("CPU")) {
709709
// multi-device execution with the CPU + GPU performs best with GPU trottling hint,
710710
// which releases another CPU thread (that is otherwise used by the GPU driver for active polling)

0 commit comments

Comments
 (0)