Skip to content

Commit 923c8e3

Browse files
authored
add benchmark for inference (#14571)
1 parent c52f65e commit 923c8e3

File tree

5 files changed

+143
-0
lines changed

5 files changed

+143
-0
lines changed

paddle/fluid/inference/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ endif()
44
# analysis and tensorrt must be added before creating static library,
55
# otherwise, there would be undefined reference to them in static library.
66
add_subdirectory(analysis)
7+
add_subdirectory(utils)
78
if (TENSORRT_FOUND)
89
add_subdirectory(tensorrt)
910
endif()
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
cc_library(benchmark SRCS benchmark.cc DEPS enforce)
2+
cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark)
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "paddle/fluid/inference/utils/benchmark.h"
16+
#include <sstream>
17+
#include "paddle/fluid/platform/enforce.h"
18+
19+
namespace paddle {
20+
namespace inference {
21+
22+
std::string Benchmark::SerializeToString() const {
23+
std::stringstream ss;
24+
ss << "-----------------------------------------------------\n";
25+
ss << "name\t";
26+
ss << "batch_size\t";
27+
ss << "num_threads\t";
28+
ss << "latency\t";
29+
ss << "qps";
30+
ss << '\n';
31+
32+
ss << name_ << "\t";
33+
ss << batch_size_ << "\t";
34+
ss << num_threads_ << "\t";
35+
ss << latency_ << "\t";
36+
ss << 1000 / latency_;
37+
ss << '\n';
38+
return ss.str();
39+
}
40+
void Benchmark::PersistToFile(const std::string &path) const {
41+
std::ofstream file(path, std::ios::app);
42+
PADDLE_ENFORCE(file.is_open(), "Can not open %s to add benchmark", path);
43+
file << SerializeToString();
44+
file.flush();
45+
file.close();
46+
}
47+
48+
} // namespace inference
49+
} // namespace paddle
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include <fstream>
16+
#include <iostream>
17+
18+
namespace paddle {
19+
namespace inference {
20+
21+
/*
22+
* Helper class to calculate the performance.
23+
*/
24+
struct Benchmark {
25+
int batch_size() const { return batch_size_; }
26+
void SetBatchSize(int x) { batch_size_ = x; }
27+
28+
int num_threads() const { return num_threads_; }
29+
void SetNumThreads(int x) { num_threads_ = x; }
30+
31+
bool use_gpu() const { return use_gpu_; }
32+
void SetUseGpu() { use_gpu_ = true; }
33+
34+
int latency() const { return latency_; }
35+
void SetLatency(int x) { latency_ = x; }
36+
37+
const std::string& name() const { return name_; }
38+
void SetName(const std::string& name) { name_ = name; }
39+
40+
std::string SerializeToString() const;
41+
void PersistToFile(const std::string& path) const;
42+
43+
private:
44+
bool use_gpu_{false};
45+
int batch_size_{0};
46+
int latency_;
47+
int num_threads_{1};
48+
std::string name_;
49+
};
50+
51+
} // namespace inference
52+
} // namespace paddle
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "paddle/fluid/inference/utils/benchmark.h"
16+
#include <glog/logging.h>
17+
#include <gtest/gtest.h>
18+
19+
using namespace paddle::inference;
20+
TEST(Benchmark, basic) {
21+
Benchmark benchmark;
22+
benchmark.SetName("key0");
23+
benchmark.SetBatchSize(10);
24+
benchmark.SetUseGpu();
25+
benchmark.SetLatency(220);
26+
LOG(INFO) << "benchmark:\n" << benchmark.SerializeToString();
27+
}
28+
29+
TEST(Benchmark, PersistToFile) {
30+
Benchmark benchmark;
31+
benchmark.SetName("key0");
32+
benchmark.SetBatchSize(10);
33+
benchmark.SetUseGpu();
34+
benchmark.SetLatency(220);
35+
36+
benchmark.PersistToFile("1.log");
37+
benchmark.PersistToFile("1.log");
38+
benchmark.PersistToFile("1.log");
39+
}

0 commit comments

Comments
 (0)