Skip to content

Commit 67598e4

Browse files
authored
Remove deprecated API usage (microsoft#144)
1 parent 743a310 commit 67598e4

File tree

9 files changed

+72
-63
lines changed

9 files changed

+72
-63
lines changed

c_cxx/MNIST/MNIST.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ struct MNIST {
4040
const char* input_names[] = {"Input3"};
4141
const char* output_names[] = {"Plus214_Output_0"};
4242

43-
session_.Run(Ort::RunOptions{nullptr}, input_names, &input_tensor_, 1, output_names, &output_tensor_, 1);
43+
Ort::RunOptions run_options;
44+
session_.Run(run_options, input_names, &input_tensor_, 1, output_names, &output_tensor_, 1);
4445
softmax(results_);
4546
result_ = std::distance(results_.begin(), std::max_element(results_.begin(), results_.end()));
4647
return result_;

c_cxx/README.md

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,7 @@ This directory contains a few C/C++ sample applications for demoing onnxruntime
1212
## Prerequisites
1313
1. Visual Studio 2015/2017/2019
1414
2. cmake(version >=3.13)
15-
3. (optional) [libpng 1.6](http://www.libpng.org/pub/png/libpng.html)
16-
17-
You may get a precompiled libpng library from [https://onnxruntimetestdata.blob.core.windows.net/models/libpng.zip](https://onnxruntimetestdata.blob.core.windows.net/models/libpng.zip)
15+
3. (optional) [libpng 1.6](https://libpng.sourceforge.io/)
1816

1917
## Install ONNX Runtime
2018
### Option 1: download a prebuilt package
@@ -31,16 +29,16 @@ build.bat --config RelWithDebInfo --build_shared_lib --parallel
3129
By default this will build a project with "C:\Program Files (x86)\onnxruntime" install destination. This is a protected folder on Windows. If you do not want to run installation with elevated priviliges you will need to override the default installation location by passing extra CMake arguments. For example:
3230

3331
```
34-
build.bat --config RelWithDebInfo --build_shared_lib --parallel --cmake_extra_defines CMAKE_INSTALL_PREFIX=c:\dev\ort_install
32+
build.bat --config RelWithDebInfo --build_dir .\build --build_shared_lib --parallel --cmake_extra_defines CMAKE_INSTALL_PREFIX=c:\dev\ort_install
3533
```
3634

37-
By default products of the build on Windows go to .\build\Windows\<config> folder. In the case above it would be .\build\Windows\RelWithDebInfo.
35+
By default products of the build on Windows go to .\build\Windows\<config> folder. In the case above it would be .\build\RelWithDebInfo since the build folder is mentioned explicitly.
3836
If you did not specify alternative installation location above you would need to open an elevated command prompt to install onnxruntime.
3937
Run the following commands.
4038

4139
```
42-
cd .\Windows\RelWithDebInfo
43-
msbuild INSTALL.vcxproj /p:Configuration=RelWithDebInfo
40+
cmake --install .\build\RelWithDebInfo --config RelWithDebInfo
41+
4442
```
4543

4644
## Build the samples
@@ -63,6 +61,7 @@ Or build it using msbuild
6361

6462
```bat
6563
msbuild onnxruntime_samples.sln /p:Configuration=Debug|Release
64+
cmake --install .\build\Debug|Release --config Debug
6665
```
6766

6867
To run the samples make sure that your Install Folder Bin is in the path so your sample executable can find onnxruntime dll and libpng if you used it.

c_cxx/imagenet/CMakeLists.txt

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# Licensed under the MIT License.
33

44
set(FS_SOURCES local_filesystem.h sync_api.h controller.h controller.cc)
5+
56
if(WIN32)
67
LIST(APPEND FS_SOURCES local_filesystem_win.cc sync_api_win.cc)
78
else()
@@ -17,17 +18,22 @@ if(JPEG_FOUND)
1718
elseif(WIN32)
1819
SET(IMAGE_SRC image_loader_wic.cc)
1920
endif()
21+
2022
add_executable(image_classifier main.cc runnable_task.h data_processing.h ${IMAGE_SRC}
21-
async_ring_buffer.h image_loader.cc image_loader.h cached_interpolation.h single_consumer.h)
23+
async_ring_buffer.h image_loader.cc image_loader.h cached_interpolation.h single_consumer.h)
24+
2225
if(JPEG_FOUND)
2326
target_compile_definitions(image_classifier PRIVATE HAVE_JPEG)
2427
SET(IMAGE_HEADERS ${JPEG_INCLUDE_DIR})
2528
SET(IMAGE_LIBS ${JPEG_LIBRARIES})
2629
endif()
30+
2731
target_include_directories(image_classifier PRIVATE ${PROJECT_SOURCE_DIR}/include ${IMAGE_HEADERS})
32+
2833
if(WIN32)
2934
target_compile_definitions(image_classifier PRIVATE WIN32_LEAN_AND_MEAN NOMINMAX)
3035
endif()
36+
3137
target_link_libraries(image_classifier PRIVATE onnxruntime slim_fs_lib ${IMAGE_LIBS})
3238

3339

c_cxx/imagenet/image_loader.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#include "cached_interpolation.h"
99
#include "sync_api.h"
1010
#include "data_processing.h"
11-
#include <onnxruntime_c_api.h>
11+
#include <onnxruntime_cxx_api.h>
1212

1313
template <typename T>
1414
void ResizeImageInMemory(const T* input_data, float* output_data, int in_height, int in_width, int out_height,

c_cxx/imagenet/local_filesystem.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <unistd.h>
2020
#include <sys/mman.h>
2121
#endif
22+
2223
#include <onnxruntime_c_api.h>
2324
void ReadFileAsString(const ORTCHAR_T* fname, void*& p, size_t& len);
2425

c_cxx/imagenet/main.cc

Lines changed: 14 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,11 @@
1212
#include <vector>
1313
#include <memory>
1414
#include <atomic>
15+
#include <optional>
1516

16-
#include "providers.h"
17-
#include "local_filesystem.h"
18-
#include "sync_api.h"
17+
#include "providers.h"
18+
#include "local_filesystem.h"
19+
#include "sync_api.h"
1920

2021
#include <onnxruntime_cxx_api.h>
2122

@@ -26,8 +27,10 @@
2627
#ifdef _WIN32
2728
#include <atlbase.h>
2829
#endif
30+
2931
using namespace std::chrono;
3032

33+
3134
class Validator : public OutputCollector<TCharString> {
3235
private:
3336
static std::vector<std::string> ReadFileToVec(const TCharString& file_path, size_t expected_line_count) {
@@ -81,20 +84,15 @@ class Validator : public OutputCollector<TCharString> {
8184
int image_size_;
8285

8386
std::mutex m_;
84-
char* input_name_ = nullptr;
85-
char* output_name_ = nullptr;
87+
std::optional<Ort::AllocatedStringPtr> input_name_;
88+
std::optional<Ort::AllocatedStringPtr> output_name_;
8689
Ort::Env& env_;
8790
const TCharString model_path_;
8891
system_clock::time_point start_time_;
8992

9093
public:
9194
int GetImageSize() const { return image_size_; }
9295

93-
~Validator() {
94-
free(input_name_);
95-
free(output_name_);
96-
}
97-
9896
void PrintResult() {
9997
if (finished_count_ == 0) return;
10098
printf("Top-1 Accuracy %f\n", ((float)top_1_correct_count_.load() / finished_count_));
@@ -124,20 +122,15 @@ class Validator : public OutputCollector<TCharString> {
124122
VerifyInputOutputCount(session_);
125123
Ort::AllocatorWithDefaultOptions ort_alloc;
126124
{
127-
char* t = session_.GetInputName(0, ort_alloc);
128-
input_name_ = my_strdup(t);
129-
ort_alloc.Free(t);
130-
t = session_.GetOutputName(0, ort_alloc);
131-
output_name_ = my_strdup(t);
132-
ort_alloc.Free(t);
125+
input_name_.emplace(session_.GetInputNameAllocated(0, ort_alloc));
126+
output_name_.emplace(session_.GetOutputNameAllocated(0, ort_alloc));
133127
}
134128

135129
Ort::TypeInfo info = session_.GetInputTypeInfo(0);
136130
auto tensor_info = info.GetTensorTypeAndShapeInfo();
137131
size_t dim_count = tensor_info.GetDimensionsCount();
138132
assert(dim_count == 4);
139-
std::vector<int64_t> dims(dim_count);
140-
tensor_info.GetDimensions(dims.data(), dims.size());
133+
std::vector<int64_t> dims = tensor_info.GetShape();
141134
if (dims[1] != dims[2] || dims[3] != 3) {
142135
throw std::runtime_error("This model is not supported by this program. input tensor need be in NHWC format");
143136
}
@@ -150,8 +143,10 @@ class Validator : public OutputCollector<TCharString> {
150143
{
151144
std::lock_guard<std::mutex> l(m_);
152145
const size_t remain = task_id_list.size();
146+
const char* input_names[] = {input_name_->get()};
147+
char* output_names[] = {output_name_->get()};
153148
Ort::Value output_tensor{nullptr};
154-
session_.Run(Ort::RunOptions{nullptr}, &input_name_, &input_tensor, 1, &output_name_, &output_tensor, 1);
149+
session_.Run(Ort::RunOptions{nullptr}, input_names, &input_tensor, 1, output_names, &output_tensor, 1);
155150
float* probs = output_tensor.GetTensorMutableData<float>();
156151
for (const auto& s : task_id_list) {
157152
float* end = probs + output_class_count_;

c_cxx/imagenet/sync_api.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
#else
99
#include <vector>
1010
#endif
11-
#include <onnxruntime_c_api.h>
12-
#include <onnxruntime_cxx_api.h>
1311

1412
#ifdef _WIN32
1513
#define my_strtol wcstol

c_cxx/imagenet/sync_api_win.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
// Licensed under the MIT License.
33

44
#include "sync_api.h"
5+
#include <stdexcept>
56

67
void CreateAndSubmitThreadpoolWork(_In_ ONNXRUNTIME_CALLBACK_FUNCTION callback, _In_ void* data,
78
_In_opt_ PThreadPoolCallbackEnv pool) {

c_cxx/squeezenet/main.cpp

Lines changed: 40 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,11 @@
33
//
44

55
#include <assert.h>
6-
#include <vector>
76
#include <onnxruntime_cxx_api.h>
87

8+
#include <iostream>
9+
#include <vector>
10+
911
#ifdef HAVE_TENSORRT_PROVIDER_FACTORY_H
1012
#include <tensorrt_provider_factory.h>
1113
#include <tensorrt_provider_options.h>
@@ -60,13 +62,15 @@ void run_ort_trt() {
6062

6163
//**************************************************************************************************************************
6264
// It's suggested to use CreateTensorRTProviderOptions() to get provider options
63-
// since ORT takes care of valid options for you
65+
// since ORT takes care of valid options for you
6466
//**************************************************************************************************************************
6567
api.CreateTensorRTProviderOptions(&tensorrt_options);
66-
std::unique_ptr<OrtTensorRTProviderOptionsV2, decltype(api.ReleaseTensorRTProviderOptions)> rel_trt_options(tensorrt_options, api.ReleaseTensorRTProviderOptions);
67-
api.SessionOptionsAppendExecutionProvider_TensorRT_V2(static_cast<OrtSessionOptions*>(session_options), rel_trt_options.get());
68+
std::unique_ptr<OrtTensorRTProviderOptionsV2, decltype(api.ReleaseTensorRTProviderOptions)> rel_trt_options(
69+
tensorrt_options, api.ReleaseTensorRTProviderOptions);
70+
api.SessionOptionsAppendExecutionProvider_TensorRT_V2(static_cast<OrtSessionOptions*>(session_options),
71+
rel_trt_options.get());
6872

69-
printf("Runing ORT TRT EP with default provider options\n");
73+
std::cout << "Running ORT TRT EP with default provider options" << std::endl;
7074

7175
Ort::Session session(env, model_path, session_options);
7276

@@ -75,60 +79,69 @@ void run_ort_trt() {
7579
Ort::AllocatorWithDefaultOptions allocator;
7680

7781
// print number of model input nodes
78-
size_t num_input_nodes = session.GetInputCount();
79-
std::vector<const char*> input_node_names(num_input_nodes);
82+
const size_t num_input_nodes = session.GetInputCount();
83+
std::vector<Ort::AllocatedStringPtr> input_names_ptr;
84+
std::vector<const char*> input_node_names;
85+
input_names_ptr.reserve(num_input_nodes);
86+
input_node_names.reserve(num_input_nodes);
8087
std::vector<int64_t> input_node_dims; // simplify... this model has only 1 input node {1, 3, 224, 224}.
8188
// Otherwise need vector<vector<>>
8289

83-
printf("Number of inputs = %zu\n", num_input_nodes);
90+
std::cout << "Number of inputs = " << num_input_nodes << std::endl;
8491

8592
// iterate over all input nodes
86-
for (int i = 0; i < num_input_nodes; i++) {
93+
for (size_t i = 0; i < num_input_nodes; i++) {
8794
// print input node names
88-
char* input_name = session.GetInputName(i, allocator);
89-
printf("Input %d : name=%s\n", i, input_name);
90-
input_node_names[i] = input_name;
95+
auto input_name = session.GetInputNameAllocated(i, allocator);
96+
std::cout << "Input " << i << " : name =" << input_name.get() << std::endl;
97+
input_node_names.push_back(input_name.get());
98+
input_names_ptr.push_back(std::move(input_name));
9199

92100
// print input node types
93-
Ort::TypeInfo type_info = session.GetInputTypeInfo(i);
101+
auto type_info = session.GetInputTypeInfo(i);
94102
auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
95103

96104
ONNXTensorElementDataType type = tensor_info.GetElementType();
97-
printf("Input %d : type=%d\n", i, type);
105+
std::cout << "Input " << i << " : type = " << type << std::endl;
98106

99107
// print input shapes/dims
100108
input_node_dims = tensor_info.GetShape();
101-
printf("Input %d : num_dims=%zu\n", i, input_node_dims.size());
102-
for (size_t j = 0; j < input_node_dims.size(); j++)
103-
printf("Input %d : dim %zu=%jd\n", i, j, input_node_dims[j]);
109+
std::cout << "Input " << i << " : num_dims = " << input_node_dims.size() << '\n';
110+
for (size_t j = 0; j < input_node_dims.size(); j++) {
111+
std::cout << "Input " << i << " : dim[" << j << "] =" << input_node_dims[j] << '\n';
112+
}
113+
std::cout << std::flush;
104114
}
105115

106-
size_t input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size
107-
// use OrtGetTensorShapeElementCount() to get official size!
116+
constexpr size_t input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size
117+
// use OrtGetTensorShapeElementCount() to get official size!
108118

109119
std::vector<float> input_tensor_values(input_tensor_size);
110120
std::vector<const char*> output_node_names = {"softmaxout_1"};
111121

112122
// initialize input data with values in [0.0, 1.0]
113-
for (unsigned int i = 0; i < input_tensor_size; i++)
114-
input_tensor_values[i] = (float)i / (input_tensor_size + 1);
123+
for (unsigned int i = 0; i < input_tensor_size; i++) input_tensor_values[i] = (float)i / (input_tensor_size + 1);
115124

116125
// create input tensor object from data values
117126
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
118-
Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_size, input_node_dims.data(), 4);
127+
auto input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_size,
128+
input_node_dims.data(), 4);
119129
assert(input_tensor.IsTensor());
120130

121131
// score model & input tensor, get back output tensor
122-
auto output_tensors = session.Run(Ort::RunOptions{nullptr}, input_node_names.data(), &input_tensor, 1, output_node_names.data(), 1);
132+
auto output_tensors =
133+
session.Run(Ort::RunOptions{nullptr}, input_node_names.data(), &input_tensor, 1, output_node_names.data(), 1);
123134
assert(output_tensors.size() == 1 && output_tensors.front().IsTensor());
124135

125136
// Get pointer to output tensor float values
126137
float* floatarr = output_tensors.front().GetTensorMutableData<float>();
127138
assert(abs(floatarr[0] - 0.000045) < 1e-6);
128139

129140
// score the model, and print scores for first 5 classes
130-
for (int i = 0; i < 5; i++)
131-
printf("Score for class [%d] = %f\n", i, floatarr[i]);
141+
for (int i = 0; i < 5; i++) {
142+
std::cout << "Score for class [" << i << "] = " << floatarr[i] << '\n';
143+
}
144+
std::cout << std::flush;
132145

133146
// Results should be as below...
134147
// Score for class[0] = 0.000045
@@ -137,15 +150,10 @@ void run_ort_trt() {
137150
// Score for class[3] = 0.001180
138151
// Score for class[4] = 0.001317
139152

140-
141-
// release buffers allocated by ORT alloctor
142-
for(const char* node_name : input_node_names)
143-
allocator.Free(const_cast<void*>(reinterpret_cast<const void*>(node_name)));
144-
145-
printf("Done!\n");
153+
std::cout << "Done!" << std::endl;
146154
}
147155

148-
int main(int argc, char* argv[]) {
156+
int main(int /*argc*/, char*[]) {
149157
run_ort_trt();
150158
return 0;
151159
}

0 commit comments

Comments
 (0)