15
15
#include < string>
16
16
17
17
#include " openvino/openvino.hpp"
18
+ #include " openvino/runtime/intel_gpu/properties.hpp"
19
+ #include " openvino/runtime/intel_myriad/hddl_properties.hpp"
18
20
19
- #include " gpu/gpu_config.hpp"
20
- #include " vpu/hddl_config.hpp"
21
21
#include " monitors/presenter.h"
22
22
#include " utils/args_helper.hpp"
23
23
#include " utils/grid_mat.hpp"
@@ -694,24 +694,23 @@ int main(int argc, char* argv[]) {
694
694
for (const std::string& device : devices) {
695
695
if (" CPU" == device) {
696
696
if (FLAGS_nthreads != 0 ) {
697
- core.set_property (" CPU" , { { CONFIG_KEY (CPU_THREADS_NUM), std::to_string (FLAGS_nthreads) }} );
697
+ core.set_property (" CPU" , ov::inference_num_threads (FLAGS_nthreads));
698
698
}
699
- core.set_property (" CPU" , { { CONFIG_KEY (CPU_BIND_THREAD), CONFIG_VALUE (NO) }} );
699
+ core.set_property (" CPU" , ov::affinity (ov::Affinity::NONE) );
700
700
core.set_property (" CPU" , { { CONFIG_KEY (CPU_THROUGHPUT_STREAMS),
701
701
(deviceNStreams.count (" CPU" ) > 0 ? std::to_string (deviceNStreams.at (" CPU" )) :
702
702
CONFIG_VALUE (CPU_THROUGHPUT_AUTO)) }});
703
- deviceNStreams[" CPU" ] = std::stoi ( core.get_property (" CPU" , CONFIG_KEY (CPU_THROUGHPUT_STREAMS)). as <std::string>() );
703
+ deviceNStreams[" CPU" ] = core.get_property (" CPU" , ov::streams::num );
704
704
}
705
705
706
706
if (" GPU" == device) {
707
- core.set_property (" GPU" , { { CONFIG_KEY (GPU_THROUGHPUT_STREAMS),
708
- (deviceNStreams.count (" GPU" ) > 0 ? std::to_string (deviceNStreams.at (" GPU" )) :
709
- CONFIG_VALUE (GPU_THROUGHPUT_AUTO)) }});
710
- deviceNStreams[" GPU" ] = std::stoi (core.get_property (" GPU" , CONFIG_KEY (GPU_THROUGHPUT_STREAMS)).as <std::string>());
707
+ core.set_property (" GPU" , ov::streams::num (deviceNStreams.count (" GPU" ) > 0 ? deviceNStreams.at (" GPU" ) : ov::streams::AUTO));
708
+
709
+ deviceNStreams[" GPU" ] = core.get_property (" GPU" , ov::streams::num);
711
710
if (devices.end () != devices.find (" CPU" )) {
712
711
// multi-device execution with the CPU + GPU performs best with GPU trottling hint,
713
712
// which releases another CPU thread (that is otherwise used by the GPU driver for active polling)
714
- core.set_property (" GPU" , { { GPU_CONFIG_KEY (PLUGIN_THROTTLE), " 1 " }} );
713
+ core.set_property (" GPU" , ov::intel_gpu::hint::queue_throttle ( ov::intel_gpu::hint::ThrottleLevel ( 1 )) );
715
714
}
716
715
}
717
716
}
@@ -720,7 +719,7 @@ int main(int argc, char* argv[]) {
720
719
auto makeTagConfig = [&](const std::string& deviceName, const std::string& suffix) {
721
720
ov::AnyMap config;
722
721
if (FLAGS_tag && deviceName == " HDDL" ) {
723
- config[InferenceEngine::HDDL_GRAPH_TAG ] = " tag" + suffix;
722
+ config[" HDDL " ] = ov::intel_myriad::hddl::graph_tag ( " tag" + suffix) ;
724
723
}
725
724
return config;
726
725
};
0 commit comments