Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
ColumnLimit: 120

Language: Cpp
Standard: Cpp11

AccessModifierOffset: -4
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: false
BinPackArguments: false
BinPackParameters: false
CommentPragmas: "^#"
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
IndentPPDirectives: AfterHash
ForEachMacros:
- foreach
- FOREACH_CHILD
2 changes: 2 additions & 0 deletions .github/workflows/pre_commit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install clang-format
run: sudo apt-get install -y clang-format-10
- name: Install dependencies
run: pip install 'model_api/python/.[full]'
- name: Run pre-commit checks
Expand Down
5 changes: 0 additions & 5 deletions .github/workflows/test_precommit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: git --no-pager diff --check $(git hash-object -t tree /dev/null)
run: git --no-pager diff --check $(git hash-object -t tree /dev/null)
- name: Prohibit non ASCII chars in file names
run: test $(git diff --name-only --diff-filter=A -z $(git hash-object -t tree /dev/null) | LC_ALL=C tr -d '[ -~]\0' | wc -c) == 0
- run: "! git grep -n '[^ -~]' -- ':(exclude)model_api/python/README.md'"
- uses: actions/setup-python@v4
with:
python-version: 3.9
Expand Down
7 changes: 7 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,10 @@ repos:
rev: v0.41.0
hooks:
- id: markdownlint

# C++ code quality
- repo: https://github.com/cpp-linter/cpp-linter-hooks
rev: v0.5.1
hooks:
- id: clang-format
args: [--style=file, --version=10]
37 changes: 18 additions & 19 deletions examples/cpp/asynchronous_api/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,24 +13,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
*/

#include <adapters/openvino_adapter.h>
#include <models/detection_model.h>
#include <models/results.h>
#include <stddef.h>

#include <cstdint>
#include <exception>
#include <iomanip>
#include <iostream>
#include <stdexcept>
#include <string>

#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <openvino/openvino.hpp>

#include <adapters/openvino_adapter.h>
#include <models/detection_model.h>
#include <models/results.h>

#include <stdexcept>
#include <string>

int main(int argc, char* argv[]) try {
if (argc != 3) {
Expand All @@ -43,43 +39,47 @@ int main(int argc, char* argv[]) try {
}

// Instantiate Object Detection model
auto model = DetectionModel::create_model(argv[1], {}, "", false); // works with SSD models. Download it using Python Model API
//Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to obtain optimal performance.
auto model = DetectionModel::create_model(argv[1],
{},
"",
false); // works with SSD models. Download it using Python Model API
// Define number of parallel infer requests. Is this number is set to 0, OpenVINO will determine it automatically to
// obtain optimal performance.
size_t num_requests = 0;
static ov::Core core;
model->load(core, "CPU", num_requests);

std::cout << "Async inference will be carried out by " << model->getNumAsyncExecutors() << " parallel executors\n";
//Prepare batch data
// Prepare batch data
std::vector<ImageInputData> data;
for (size_t i = 0; i < 3; i++) {
data.push_back(ImageInputData(image));
}

//Batch inference is done by processing batch with num_requests parallel infer requests
// Batch inference is done by processing batch with num_requests parallel infer requests
std::cout << "Starting batch inference\n";
auto results = model->inferBatch(data);

std::cout << "Batch mode inference results:\n";
for (const auto& result : results) {
for (auto& obj : result->objects) {
std::cout << " " << std::left << std::setw(9) << obj.confidence << " " << obj.label << "\n";
std::cout << " " << std::left << std::setw(9) << obj.confidence << " " << obj.label << "\n";
}
std::cout << std::string(10, '-') << "\n";
}
std::cout << "Batch mode inference done\n";
std::cout << "Async mode inference results:\n";

//Set callback to grab results once the inference is done
model->setCallback([](std::unique_ptr<ResultBase> result, const ov::AnyMap& callback_args){
// Set callback to grab results once the inference is done
model->setCallback([](std::unique_ptr<ResultBase> result, const ov::AnyMap& callback_args) {
auto det_result = std::unique_ptr<DetectionResult>(static_cast<DetectionResult*>(result.release()));

//callback_args can contain arbitrary data
// callback_args can contain arbitrary data
size_t id = callback_args.find("id")->second.as<size_t>();

std::cout << "Request with id " << id << " is finished\n";
for (auto& obj : det_result->objects) {
std::cout << " " << std::left << std::setw(9) << obj.confidence << " " << obj.label << "\n";
std::cout << " " << std::left << std::setw(9) << obj.confidence << " " << obj.label << "\n";
}
std::cout << std::string(10, '-') << "\n";
});
Expand All @@ -88,7 +88,6 @@ int main(int argc, char* argv[]) try {
model->inferAsync(image, {{"id", i}});
}
model->awaitAll();

} catch (const std::exception& error) {
std::cerr << error.what() << '\n';
return 1;
Expand Down
20 changes: 9 additions & 11 deletions examples/cpp/synchronous_api/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,23 +14,21 @@
// limitations under the License.
*/

#include <models/detection_model.h>
#include <models/input_data.h>
#include <models/results.h>
#include <stddef.h>

#include <cstdint>
#include <exception>
#include <iomanip>
#include <iostream>
#include <stdexcept>
#include <string>

#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <openvino/openvino.hpp>

#include <models/detection_model.h>
#include <models/input_data.h>
#include <models/results.h>
#include <stdexcept>
#include <string>

int main(int argc, char* argv[]) try {
if (argc != 3) {
Expand All @@ -43,16 +41,16 @@ int main(int argc, char* argv[]) try {
}

// Instantiate Object Detection model
auto model = DetectionModel::create_model(argv[1]); // works with SSD models. Download it using Python Model API
auto model = DetectionModel::create_model(argv[1]); // works with SSD models. Download it using Python Model API

// Run the inference
auto result = model->infer(image);

// Process detections
for (auto& obj : result->objects) {
std::cout << " " << std::left << std::setw(9) << obj.label << " | " << std::setw(10) << obj.confidence
<< " | " << std::setw(4) << int(obj.x) << " | " << std::setw(4) << int(obj.y) << " | "
<< std::setw(4) << int(obj.x + obj.width) << " | " << std::setw(4) << int(obj.y + obj.height) << "\n";
std::cout << " " << std::left << std::setw(9) << obj.label << " | " << std::setw(10) << obj.confidence << " | "
<< std::setw(4) << int(obj.x) << " | " << std::setw(4) << int(obj.y) << " | " << std::setw(4)
<< int(obj.x + obj.width) << " | " << std::setw(4) << int(obj.y + obj.height) << "\n";
}
} catch (const std::exception& error) {
std::cerr << error.what() << '\n';
Expand Down
15 changes: 7 additions & 8 deletions model_api/cpp/adapters/include/adapters/inference_adapter.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@
*/

#pragma once
#include <string>
#include <functional>
#include <vector>
#include <map>
#include <memory>

#include <openvino/openvino.hpp>
#include <string>
#include <vector>

struct InputData;
struct InferenceResult;
Expand All @@ -31,9 +30,7 @@ using InferenceInput = std::map<std::string, ov::Tensor>;
using CallbackData = std::shared_ptr<ov::AnyMap>;

// The interface doesn't have implementation
class InferenceAdapter
{

class InferenceAdapter {
public:
virtual ~InferenceAdapter() = default;

Expand All @@ -44,8 +41,10 @@ class InferenceAdapter
virtual void awaitAll() = 0;
virtual void awaitAny() = 0;
virtual size_t getNumAsyncExecutors() const = 0;
virtual void loadModel(const std::shared_ptr<const ov::Model>& model, ov::Core& core,
const std::string& device = "", const ov::AnyMap& compilationConfig = {},
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
ov::Core& core,
const std::string& device = "",
const ov::AnyMap& compilationConfig = {},
size_t max_num_requests = 0) = 0;
virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0;
virtual std::vector<std::string> getInputNames() const = 0;
Expand Down
22 changes: 11 additions & 11 deletions model_api/cpp/adapters/include/adapters/openvino_adapter.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,17 @@
*/

#pragma once
#include <string>
#include <functional>
#include <vector>
#include <map>
#include <queue>
#include <memory>
#include <queue>
#include <string>
#include <vector>

#include "adapters/inference_adapter.h"
#include "utils/async_infer_queue.hpp"

class OpenVINOInferenceAdapter :public InferenceAdapter
{

class OpenVINOInferenceAdapter : public InferenceAdapter {
public:
OpenVINOInferenceAdapter() = default;

Expand All @@ -37,9 +35,11 @@ class OpenVINOInferenceAdapter :public InferenceAdapter
virtual bool isReady();
virtual void awaitAll();
virtual void awaitAny();
virtual void loadModel(const std::shared_ptr<const ov::Model>& model, ov::Core& core,
const std::string& device = "", const ov::AnyMap& compilationConfig = {},
size_t max_num_requests = 1) override;
virtual void loadModel(const std::shared_ptr<const ov::Model>& model,
ov::Core& core,
const std::string& device = "",
const ov::AnyMap& compilationConfig = {},
size_t max_num_requests = 1) override;
virtual size_t getNumAsyncExecutors() const;
virtual ov::PartialShape getInputShape(const std::string& inputName) const override;
virtual std::vector<std::string> getInputNames() const override;
Expand All @@ -50,10 +50,10 @@ class OpenVINOInferenceAdapter :public InferenceAdapter
void initInputsOutputs();

protected:
//Depends on the implementation details but we should share the model state in this class
// Depends on the implementation details but we should share the model state in this class
std::vector<std::string> inputNames;
std::vector<std::string> outputNames;
ov::CompiledModel compiledModel;
std::unique_ptr<AsyncInferQueue> asyncQueue;
ov::AnyMap modelConfig; // the content of model_info section of rt_info
ov::AnyMap modelConfig; // the content of model_info section of rt_info
};
13 changes: 8 additions & 5 deletions model_api/cpp/adapters/src/openvino_adapter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,17 @@
*/

#include "adapters/openvino_adapter.h"

#include <openvino/openvino.hpp>
#include <utils/slog.hpp>
#include <stdexcept>
#include <utils/slog.hpp>
#include <vector>

void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>& model, ov::Core& core,
const std::string& device, const ov::AnyMap& compilationConfig, size_t max_num_requests) {
void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>& model,
ov::Core& core,
const std::string& device,
const ov::AnyMap& compilationConfig,
size_t max_num_requests) {
slog::info << "Loading model to the plugin" << slog::endl;
ov::AnyMap customCompilationConfig(compilationConfig);
if (max_num_requests != 1) {
Expand All @@ -33,8 +37,7 @@ void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>&
customCompilationConfig["PERFORMANCE_HINT_NUM_REQUESTS"] = ov::hint::num_requests(max_num_requests);
}
}
}
else {
} else {
if (customCompilationConfig.find("PERFORMANCE_HINT") == customCompilationConfig.end()) {
customCompilationConfig["PERFORMANCE_HINT"] = ov::hint::PerformanceMode::LATENCY;
}
Expand Down
Loading
Loading