Skip to content

Commit 048f62f

Browse files
committed
Shrinked ONNX gbenchs into a single function
1 parent e11289f commit 048f62f

9 files changed

+115
-35
lines changed

root/tmva/sofie/CMakeLists.txt

Lines changed: 33 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -21,33 +21,52 @@ if(ROOT_tmva_FOUND AND ROOT_tmva-sofie_FOUND)
2121
if(ONNXRuntime_FOUND)
2222
message(STATUS "Found ONNXRuntime (build type: ${ONNXRuntime_BUILD_TYPE}, version: ${ONNXRuntime_VERSION_STRING})")
2323

24-
# Creating an ONNXRuntime inference benchmark for every model
25-
foreach(fname ${ONNX_MODELS})
26-
get_filename_component(fname ${fname} NAME_WE)
27-
set(target_name ONNXRuntime_${fname})
28-
configure_file(ONNXRuntimeInference.cxx.in ${target_name}.cxx)
29-
30-
RB_ADD_GBENCHMARK(${target_name}
31-
${target_name}.cxx
32-
LABEL short
33-
LIBRARIES TMVA onnxruntime
34-
)
35-
target_link_directories(${target_name} PRIVATE ${ONNXRuntime_LIBRARIES})
36-
target_include_directories(${target_name} PRIVATE ${ONNXRuntime_INCLUDE_DIR})
24+
# Configuring ONNXRuntimeInference_Template.cxx.in
25+
set(FUNC_NAME "BM_ONNXRuntime_Inference")
26+
set(CAPTURE_STR "BENCHMARK_CAPTURE(${FUNC_NAME}, @1,\t@2)@3")
27+
set(HEAD_COMMENT "Automatically configured by CMake")
28+
set(ALL_CAPTURES "")
29+
set(ONNX_MODEL_NAMES "")
30+
foreach(model ${ONNX_MODELS})
31+
get_filename_component(fname ${model} NAME)
32+
get_filename_component(fname_we ${model} NAME_WE)
33+
string(REPLACE "@1" ${fname_we} cap ${CAPTURE_STR})
34+
string(REPLACE "@2" "\"${ONNX_MODELS_DIR}/${fname}\"" cap ${cap})
35+
list(APPEND ALL_CAPTURES ${cap})
3736
endforeach()
37+
string(REPLACE ";" "\n" BENCHMARK_CAPTURES "${ALL_CAPTURES}") # String[] -> String
38+
string(REPLACE "@3" ";" BENCHMARK_CAPTURES "${BENCHMARK_CAPTURES}") # Adding semicolon
39+
configure_file(ONNXRuntimeInference_Template.cxx.in ONNXRuntimeInference.cxx @ONLY)
40+
41+
RB_ADD_GBENCHMARK(ONNXRuntimeInference
42+
ONNXRuntimeInference.cxx
43+
LABEL short
44+
LIBRARIES TMVA onnxruntime
45+
)
46+
target_link_directories(ONNXRuntimeInference PRIVATE ${ONNXRuntime_LIBRARIES})
47+
target_include_directories(ONNXRuntimeInference PRIVATE ${ONNXRuntime_INCLUDE_DIR})
48+
3849
endif()
3950

4051
# Checking the SOFIE compiler
4152
if(NOT EXISTS ${ROOTSYS}/tmva/sofie/test/emitFromONNX)
4253
message(FATAL_ERROR "SOFIE compiler not found")
4354
endif()
4455

45-
# Creating an inference benchmark for SOFIE
56+
# Benchmark for models emitted by SOFIE
4657
RB_ADD_GBENCHMARK(SOFIEInference
4758
SOFIEInference.cxx
4859
LABEL short
4960
LIBRARIES TMVA blas
5061
)
62+
63+
# Benchmark for models emitted by SOFIE (with RModelProfiler)
64+
# This should be useful in measuring profiler's overhead
65+
RB_ADD_GBENCHMARK(SOFIEInferenceProfiler
66+
SOFIEInferenceProfiler.cxx
67+
LABEL short
68+
LIBRARIES TMVA blas
69+
)
5170

5271
endif()
5372

root/tmva/sofie/ONNXRuntimeInference.cxx.in renamed to root/tmva/sofie/ONNXRuntimeInference_Template.cxx.in

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
// @HEAD_COMMENT@
12
// Author: Federico Sossai (fsossai), 2021
23

34
#include <benchmark/benchmark.h>
@@ -9,10 +10,8 @@
910

1011
using namespace std;
1112

12-
static void BM_ONNXRuntime_${fname}(benchmark::State& state)
13+
static void @FUNC_NAME@(benchmark::State& state, string model_path)
1314
{
14-
const string model_path = "${ONNX_MODELS_DIR}/${fname}.onnx";
15-
1615
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "benchmark");
1716

1817
Ort::SessionOptions session_options;
@@ -34,20 +33,6 @@ static void BM_ONNXRuntime_${fname}(benchmark::State& state)
3433
.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
3534
vector<int64_t> output_node_dims = session
3635
.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
37-
38-
// Displaying input tensor shape
39-
40-
cout << "Input shape: [ ";
41-
for (auto& i: input_node_dims)
42-
cout << i << " ";
43-
cout << "]" << endl;
44-
45-
// Displaying output tensor shape
46-
47-
cout << "Output shape: [ ";
48-
for (auto& i: output_node_dims)
49-
cout << i << " ";
50-
cout << "]" << endl;
5136

5237
// Calculating the dimension of the input tensor
5338

@@ -70,6 +55,6 @@ static void BM_ONNXRuntime_${fname}(benchmark::State& state)
7055
&input_tensor, 1, output_node_names.data(), 1);
7156
}
7257
}
73-
BENCHMARK(BM_ONNXRuntime_${fname});
58+
@BENCHMARK_CAPTURES@
7459

7560
BENCHMARK_MAIN();

root/tmva/sofie/SOFIEInference.cxx

Lines changed: 32 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,42 @@
33
#include <benchmark/benchmark.h>
44

55
#include <iostream>
6+
#include <thread>
7+
#include <chrono>
8+
#include <utility>
9+
#include <vector>
10+
#include <memory>
11+
#include <functional>
612

7-
static void BM_SOFIE_Inference(benchmark::State& state)
13+
#include "input_models/compiled/Linear_16.hxx"
14+
#include "input_models/compiled/Linear_32.hxx"
15+
#include "input_models/compiled/Linear_64.hxx"
16+
17+
using namespace std;
18+
19+
static void BM_SOFIE_Inference(benchmark::State& state, std::string model_name)
820
{
21+
vector<float> input;
22+
vector<float> (*infer_func)(float*);
23+
24+
if (model_name == "Linear_16") {
25+
input.resize(1600);
26+
infer_func = TMVA_SOFIE_Linear_16::infer;
27+
} else if (model_name == "Linear_32") {
28+
input.resize(3200);
29+
infer_func = TMVA_SOFIE_Linear_32::infer;
30+
} else if (model_name == "Linear_64") {
31+
input.resize(6400);
32+
infer_func = TMVA_SOFIE_Linear_64::infer;
33+
}
34+
35+
float *input_ptr = input.data();
936
for (auto _ : state) {
10-
;
37+
infer_func(input_ptr);
1138
}
1239
}
13-
BENCHMARK(BM_SOFIE_Inference);
40+
BENCHMARK_CAPTURE(BM_SOFIE_Inference, Linear_16, "Linear_16");
41+
BENCHMARK_CAPTURE(BM_SOFIE_Inference, Linear_32, "Linear_32");
42+
BENCHMARK_CAPTURE(BM_SOFIE_Inference, Linear_64, "Linear_64");
1443

1544
BENCHMARK_MAIN();
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// Author: Federico Sossai (fsossai), 2021
2+
3+
#include <benchmark/benchmark.h>
4+
5+
#include <iostream>
6+
#include <thread>
7+
#include <chrono>
8+
#include <utility>
9+
#include <vector>
10+
#include <memory>
11+
#include <functional>
12+
13+
#include "input_models/compiled/Linear_16_prof.hxx"
14+
#include "input_models/compiled/Linear_32_prof.hxx"
15+
#include "input_models/compiled/Linear_64_prof.hxx"
16+
17+
using namespace std;
18+
19+
static void BM_SOFIE_Inference(benchmark::State& state, std::string model_name)
20+
{
21+
vector<float> input;
22+
vector<float> (*infer_func)(float*);
23+
24+
if (model_name == "Linear_16") {
25+
input.resize(1600);
26+
infer_func = TMVA_SOFIE_Linear_16::infer;
27+
} else if (model_name == "Linear_32") {
28+
input.resize(3200);
29+
infer_func = TMVA_SOFIE_Linear_32::infer;
30+
} else if (model_name == "Linear_64") {
31+
input.resize(6400);
32+
infer_func = TMVA_SOFIE_Linear_64::infer;
33+
}
34+
35+
float *input_ptr = input.data();
36+
for (auto _ : state) {
37+
infer_func(input_ptr);
38+
}
39+
cout << TMVA_SOFIE_Linear_16::profiler_results.size() << endl;
40+
cout << TMVA_SOFIE_Linear_32::profiler_results.size() << endl;
41+
cout << TMVA_SOFIE_Linear_64::profiler_results.size() << endl;
42+
}
43+
BENCHMARK_CAPTURE(BM_SOFIE_Inference, Linear_16, "Linear_16");
44+
BENCHMARK_CAPTURE(BM_SOFIE_Inference, Linear_32, "Linear_32");
45+
BENCHMARK_CAPTURE(BM_SOFIE_Inference, Linear_64, "Linear_64");
46+
47+
BENCHMARK_MAIN();
-268 Bytes
Binary file not shown.
-275 Bytes
Binary file not shown.
-267 Bytes
Binary file not shown.
-265 Bytes
Binary file not shown.
-243 Bytes
Binary file not shown.

0 commit comments

Comments
 (0)