|
25 | 25 | #include <iostream> |
26 | 26 | #include <sstream> |
27 | 27 | #include <vector> |
28 | | -#include <experimental_onnxruntime_cxx_api.h> |
| 28 | +#include <onnxruntime_cxx_api.h> |
29 | 29 |
|
30 | 30 | // pretty prints a shape dimension vector |
31 | 31 | std::string print_shape(const std::vector<int64_t>& v) { |
@@ -64,59 +64,102 @@ int main(int argc, char** argv) { |
64 | 64 | //Appending OpenVINO Execution Provider API |
65 | 65 | #ifdef USE_OPENVINO |
66 | 66 | // Using OPENVINO backend |
67 | | - OrtOpenVINOProviderOptions options; |
68 | | - options.device_type = "CPU"; |
69 | | - std::cout << "OpenVINO device type is set to: " << options.device_type << std::endl; |
70 | | - session_options.AppendExecutionProvider_OpenVINO(options); |
| 67 | + std::unordered_map<std::string, std::string> options; |
| 68 | + options["device_type"] = "CPU"; |
| 69 | + std::cout << "OpenVINO device type is set to: " << options["device_type"] << std::endl; |
| 70 | + session_options.AppendExecutionProvider_OpenVINO_V2(options); |
71 | 71 | #endif |
72 | | - Ort::Experimental::Session session = Ort::Experimental::Session(env, model_file, session_options); // access experimental components via the Experimental namespace |
73 | | - |
74 | | - // print name/shape of inputs |
75 | | - std::vector<std::string> input_names = session.GetInputNames(); |
76 | | - std::vector<std::vector<int64_t> > input_shapes = session.GetInputShapes(); |
77 | | - cout << "Input Node Name/Shape (" << input_names.size() << "):" << endl; |
78 | | - for (size_t i = 0; i < input_names.size(); i++) { |
| 72 | + Ort::Session session(env, model_file.c_str(), session_options); |
| 73 | + Ort::AllocatorWithDefaultOptions allocator; |
| 74 | + |
| 75 | + size_t num_input_nodes = session.GetInputCount(); |
| 76 | + std::vector<std::string> input_names; |
| 77 | + std::vector<std::vector<int64_t>> input_shapes; |
| 78 | + |
| 79 | + cout << "Input Node Name/Shape (" << num_input_nodes << "):" << endl; |
| 80 | + for (size_t i = 0; i < num_input_nodes; i++) { |
| 81 | + // Get input name |
| 82 | + auto input_name = session.GetInputNameAllocated(i, allocator); |
| 83 | + input_names.push_back(std::string(input_name.get())); |
| 84 | + |
| 85 | + // Get input shape |
| 86 | + Ort::TypeInfo input_type_info = session.GetInputTypeInfo(i); |
| 87 | + auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo(); |
| 88 | + std::vector<int64_t> input_dims = input_tensor_info.GetShape(); |
| 89 | + input_shapes.push_back(input_dims); |
| 90 | + |
79 | 91 | cout << "\t" << input_names[i] << " : " << print_shape(input_shapes[i]) << endl; |
| 92 | + |
80 | 93 | } |
81 | 94 |
|
82 | | - // print name/shape of outputs |
83 | | - std::vector<std::string> output_names = session.GetOutputNames(); |
84 | | - std::vector<std::vector<int64_t> > output_shapes = session.GetOutputShapes(); |
85 | | - cout << "Output Node Name/Shape (" << output_names.size() << "):" << endl; |
86 | | - for (size_t i = 0; i < output_names.size(); i++) { |
| 95 | + size_t num_output_nodes = session.GetOutputCount(); |
| 96 | + std::vector<std::string> output_names; |
| 97 | + std::vector<std::vector<int64_t>> output_shapes; |
| 98 | + |
| 99 | + cout << "Output Node Name/Shape (" << num_output_nodes << "):" << endl; |
| 100 | + for (size_t i = 0; i < num_output_nodes; i++) { |
| 101 | + // Get output name |
| 102 | + auto output_name = session.GetOutputNameAllocated(i, allocator); |
| 103 | + output_names.push_back(std::string(output_name.get())); |
| 104 | + |
| 105 | + // Get output shape |
| 106 | + Ort::TypeInfo output_type_info = session.GetOutputTypeInfo(i); |
| 107 | + auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo(); |
| 108 | + std::vector<int64_t> output_dims = output_tensor_info.GetShape(); |
| 109 | + output_shapes.push_back(output_dims); |
| 110 | + |
87 | 111 | cout << "\t" << output_names[i] << " : " << print_shape(output_shapes[i]) << endl; |
| 112 | + |
88 | 113 | } |
89 | | - |
90 | | - // Assume model has 1 input node and 1 output node. |
| 114 | + |
91 | 115 | assert(input_names.size() == 1 && output_names.size() == 1); |
92 | 116 |
|
93 | 117 | // Create a single Ort tensor of random numbers |
94 | 118 | auto input_shape = input_shapes[0]; |
95 | 119 | int total_number_elements = calculate_product(input_shape); |
96 | 120 | std::vector<float> input_tensor_values(total_number_elements); |
97 | | - std::generate(input_tensor_values.begin(), input_tensor_values.end(), [&] { return rand() % 255; }); // generate random numbers in the range [0, 255] |
| 121 | + std::generate(input_tensor_values.begin(), input_tensor_values.end(), [&] { return rand() % 255; }); |
| 122 | + |
| 123 | + // Create input tensor |
| 124 | + Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); |
98 | 125 | std::vector<Ort::Value> input_tensors; |
99 | | - input_tensors.push_back(Ort::Experimental::Value::CreateTensor<float>(input_tensor_values.data(), input_tensor_values.size(), input_shape)); |
| 126 | + input_tensors.push_back(Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), |
| 127 | + input_tensor_values.size(), input_shape.data(), |
| 128 | + input_shape.size())); |
100 | 129 |
|
101 | 130 | // double-check the dimensions of the input tensor |
102 | 131 | assert(input_tensors[0].IsTensor() && |
103 | 132 | input_tensors[0].GetTensorTypeAndShapeInfo().GetShape() == input_shape); |
104 | 133 | cout << "\ninput_tensor shape: " << print_shape(input_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl; |
105 | 134 |
|
| 135 | + // Create input/output name arrays for Run() |
| 136 | + std::vector<const char*> input_names_char(input_names.size(), nullptr); |
| 137 | + std::vector<const char*> output_names_char(output_names.size(), nullptr); |
| 138 | + |
| 139 | + for (size_t i = 0; i < input_names.size(); i++) { |
| 140 | + input_names_char[i] = input_names[i].c_str(); |
| 141 | + } |
| 142 | + for (size_t i = 0; i < output_names.size(); i++) { |
| 143 | + output_names_char[i] = output_names[i].c_str(); |
| 144 | + } |
| 145 | + |
106 | 146 | // pass data through model |
107 | 147 | cout << "Running model..."; |
108 | 148 | try { |
109 | | - auto output_tensors = session.Run(session.GetInputNames(), input_tensors, session.GetOutputNames()); |
| 149 | + auto output_tensors = session.Run(Ort::RunOptions{nullptr}, input_names_char.data(), |
| 150 | + input_tensors.data(), input_names_char.size(), |
| 151 | + output_names_char.data(), output_names_char.size()); |
110 | 152 | cout << "done" << endl; |
111 | 153 |
|
112 | 154 | // double-check the dimensions of the output tensors |
113 | | - // NOTE: the number of output tensors is equal to the number of output nodes specifed in the Run() call |
114 | | - assert(output_tensors.size() == session.GetOutputNames().size() && |
115 | | - output_tensors[0].IsTensor()); |
| 155 | + assert(output_tensors.size() == output_names.size() && output_tensors[0].IsTensor()); |
116 | 156 | cout << "output_tensor_shape: " << print_shape(output_tensors[0].GetTensorTypeAndShapeInfo().GetShape()) << endl; |
117 | 157 |
|
118 | 158 | } catch (const Ort::Exception& exception) { |
119 | 159 | cout << "ERROR running model inference: " << exception.what() << endl; |
120 | 160 | exit(-1); |
121 | 161 | } |
| 162 | + |
| 163 | + return 0; |
| 164 | + |
122 | 165 | } |
0 commit comments