33 * SPDX-License-Identifier: Apache-2.0
44 */
55#include < adapters/openvino_adapter.h>
6- #include < models/detection_model .h>
7- #include < models /results.h>
6+ #include < tasks/detection .h>
7+ #include < tasks /results.h>
88#include < stddef.h>
99
1010#include < cstdint>
@@ -30,55 +30,31 @@ int main(int argc, char* argv[]) try {
3030 }
3131
3232 // Instantiate Object Detection model
33- auto model = DetectionModel::create_model (argv[1 ],
34- {},
35- " " ,
36- false ); // works with SSD models. Download it using Python Model API
37- // Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to
38- // obtain optimal performance.
39- size_t num_requests = 0 ;
40- static ov::Core core;
41- model->load (core, " CPU" , num_requests);
33+ auto model = DetectionModel::load (argv[1 ], {}); // works with SSD models. Download it using Python Model API
34+ // // Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to
35+ // // obtain optimal performance.
36+ // size_t num_requests = 0;
37+ // static ov::Core core;
38+ // model->load(core, "CPU", num_requests);
4239
43- std::cout << " Async inference will be carried out by " << model->getNumAsyncExecutors () << " parallel executors\n " ;
44- // Prepare batch data
45- std::vector<ImageInputData > data;
46- for (size_t i = 0 ; i < 3 ; i++) {
47- data.push_back (ImageInputData (image));
48- }
40+ // std::cout << "Async inference will be carried out by " << model->getNumAsyncExecutors() << " parallel executors\n";
41+ // // Prepare batch data
42+ std::vector<cv::Mat > data = {image} ;
43+ // for (size_t i = 0; i < 3; i++) {
44+ // data.push_back(ImageInputData(image));
45+ // }
4946
5047 // Batch inference is done by processing batch with num_requests parallel infer requests
5148 std::cout << " Starting batch inference\n " ;
52- auto results = model-> inferBatch (data);
49+ auto results = model. inferBatch (data);
5350
5451 std::cout << " Batch mode inference results:\n " ;
5552 for (const auto & result : results) {
56- for (auto & obj : result-> objects ) {
53+ for (auto & obj : result. objects ) {
5754 std::cout << " " << std::left << std::setw (9 ) << obj.confidence << " " << obj.label << " \n " ;
5855 }
5956 std::cout << std::string (10 , ' -' ) << " \n " ;
6057 }
61- std::cout << " Batch mode inference done\n " ;
62- std::cout << " Async mode inference results:\n " ;
63-
64- // Set callback to grab results once the inference is done
65- model->setCallback ([](std::unique_ptr<ResultBase> result, const ov::AnyMap& callback_args) {
66- auto det_result = std::unique_ptr<DetectionResult>(static_cast <DetectionResult*>(result.release ()));
67-
68- // callback_args can contain arbitrary data
69- size_t id = callback_args.find (" id" )->second .as <size_t >();
70-
71- std::cout << " Request with id " << id << " is finished\n " ;
72- for (auto & obj : det_result->objects ) {
73- std::cout << " " << std::left << std::setw (9 ) << obj.confidence << " " << obj.label << " \n " ;
74- }
75- std::cout << std::string (10 , ' -' ) << " \n " ;
76- });
77-
78- for (size_t i = 0 ; i < 3 ; i++) {
79- model->inferAsync (image, {{" id" , i}});
80- }
81- model->awaitAll ();
8258} catch (const std::exception& error) {
8359 std::cerr << error.what () << ' \n ' ;
8460 return 1 ;
0 commit comments