1313// See the License for the specific language governing permissions and
1414// limitations under the License.
1515*/
16-
16+ #include < adapters/openvino_adapter.h>
17+ #include < models/detection_model.h>
18+ #include < models/results.h>
1719#include < stddef.h>
1820
1921#include < cstdint>
2022#include < exception>
2123#include < iomanip>
2224#include < iostream>
23- #include < stdexcept>
24- #include < string>
25-
2625#include < opencv2/core.hpp>
2726#include < opencv2/imgproc.hpp>
2827#include < openvino/openvino.hpp>
29-
30- #include < adapters/openvino_adapter.h>
31- #include < models/detection_model.h>
32- #include < models/results.h>
33-
28+ #include < stdexcept>
29+ #include < string>
3430
3531int main (int argc, char * argv[]) try {
3632 if (argc != 3 ) {
@@ -43,43 +39,47 @@ int main(int argc, char* argv[]) try {
4339 }
4440
4541 // Instantiate Object Detection model
46- auto model = DetectionModel::create_model (argv[1 ], {}, " " , false ); // works with SSD models. Download it using Python Model API
47- // Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to obtain optimal performance.
42+ auto model = DetectionModel::create_model (argv[1 ],
43+ {},
44+ " " ,
45+ false ); // works with SSD models. Download it using Python Model API
46+ // Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to
47+ // obtain optimal performance.
4848 size_t num_requests = 0 ;
4949 static ov::Core core;
5050 model->load (core, " CPU" , num_requests);
5151
5252 std::cout << " Async inference will be carried out by " << model->getNumAsyncExecutors () << " parallel executors\n " ;
53- // Prepare batch data
53+ // Prepare batch data
5454 std::vector<ImageInputData> data;
5555 for (size_t i = 0 ; i < 3 ; i++) {
5656 data.push_back (ImageInputData (image));
5757 }
5858
59- // Batch inference is done by processing batch with num_requests parallel infer requests
59+ // Batch inference is done by processing batch with num_requests parallel infer requests
6060 std::cout << " Starting batch inference\n " ;
6161 auto results = model->inferBatch (data);
6262
6363 std::cout << " Batch mode inference results:\n " ;
6464 for (const auto & result : results) {
6565 for (auto & obj : result->objects ) {
66- std::cout << " " << std::left << std::setw (9 ) << obj.confidence << " " << obj.label << " \n " ;
66+ std::cout << " " << std::left << std::setw (9 ) << obj.confidence << " " << obj.label << " \n " ;
6767 }
6868 std::cout << std::string (10 , ' -' ) << " \n " ;
6969 }
7070 std::cout << " Batch mode inference done\n " ;
7171 std::cout << " Async mode inference results:\n " ;
7272
73- // Set callback to grab results once the inference is done
74- model->setCallback ([](std::unique_ptr<ResultBase> result, const ov::AnyMap& callback_args){
73+ // Set callback to grab results once the inference is done
74+ model->setCallback ([](std::unique_ptr<ResultBase> result, const ov::AnyMap& callback_args) {
7575 auto det_result = std::unique_ptr<DetectionResult>(static_cast <DetectionResult*>(result.release ()));
7676
77- // callback_args can contain arbitrary data
77+ // callback_args can contain arbitrary data
7878 size_t id = callback_args.find (" id" )->second .as <size_t >();
7979
8080 std::cout << " Request with id " << id << " is finished\n " ;
8181 for (auto & obj : det_result->objects ) {
82- std::cout << " " << std::left << std::setw (9 ) << obj.confidence << " " << obj.label << " \n " ;
82+ std::cout << " " << std::left << std::setw (9 ) << obj.confidence << " " << obj.label << " \n " ;
8383 }
8484 std::cout << std::string (10 , ' -' ) << " \n " ;
8585 });
@@ -88,7 +88,6 @@ int main(int argc, char* argv[]) try {
8888 model->inferAsync (image, {{" id" , i}});
8989 }
9090 model->awaitAll ();
91-
9291} catch (const std::exception& error) {
9392 std::cerr << error.what () << ' \n ' ;
9493 return 1 ;
0 commit comments