Skip to content

Commit 55f0d84

Browse files
authored
Fix Cpplint Issues in fluid/inference/tensorrt/ (#10318)
* Fix CPPLint issues in fluid/inference/tensorrt/ * Fix compile errors
1 parent 0bc44c1 commit 55f0d84

File tree

3 files changed

+19
-19
lines changed

3 files changed

+19
-19
lines changed

paddle/fluid/inference/tensorrt/engine.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class TensorRTEngine : public EngineBase {
6565
// Initialize the inference network, so that TensorRT layers can add to this
6666
// network.
6767
void InitNetwork() {
68-
infer_builder_.reset(createInferBuilder(logger_));
68+
infer_builder_.reset(createInferBuilder(&logger_));
6969
infer_network_.reset(infer_builder_->createNetwork());
7070
}
7171
// After finishing adding ops, freeze this network and creates the executation

paddle/fluid/inference/tensorrt/helper.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,13 +46,13 @@ const int kDataTypeSize[] = {
4646
// The following two API are implemented in TensorRT's header file, cannot load
4747
// from the dynamic library. So create our own implementation and directly
4848
// trigger the method from the dynamic library.
49-
static nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) {
49+
static nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger* logger) {
5050
return static_cast<nvinfer1::IBuilder*>(
51-
dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION));
51+
dy::createInferBuilder_INTERNAL(logger, NV_TENSORRT_VERSION));
5252
}
53-
static nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) {
53+
static nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger* logger) {
5454
return static_cast<nvinfer1::IRuntime*>(
55-
dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION));
55+
dy::createInferRuntime_INTERNAL(logger, NV_TENSORRT_VERSION));
5656
}
5757

5858
// A logger for create TensorRT infer builder.
@@ -80,7 +80,7 @@ class NaiveLogger : public nvinfer1::ILogger {
8080
return *x;
8181
}
8282

83-
virtual ~NaiveLogger() override {}
83+
~NaiveLogger() override {}
8484
};
8585

8686
} // namespace tensorrt

paddle/fluid/inference/tensorrt/test_tensorrt.cc

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

15+
#include <cuda.h>
16+
#include <cuda_runtime_api.h>
1517
#include <glog/logging.h>
1618
#include <gtest/gtest.h>
1719
#include "NvInfer.h"
18-
#include "cuda.h"
19-
#include "cuda_runtime_api.h"
2020
#include "paddle/fluid/platform/dynload/tensorrt.h"
2121

2222
namespace dy = paddle::platform::dynload;
@@ -43,7 +43,7 @@ class Logger : public nvinfer1::ILogger {
4343

4444
class ScopedWeights {
4545
public:
46-
ScopedWeights(float value) : value_(value) {
46+
explicit ScopedWeights(float value) : value_(value) {
4747
w.type = nvinfer1::DataType::kFLOAT;
4848
w.values = &value_;
4949
w.count = 1;
@@ -58,13 +58,13 @@ class ScopedWeights {
5858
// The following two API are implemented in TensorRT's header file, cannot load
5959
// from the dynamic library. So create our own implementation and directly
6060
// trigger the method from the dynamic library.
61-
nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) {
61+
nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger* logger) {
6262
return static_cast<nvinfer1::IBuilder*>(
63-
dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION));
63+
dy::createInferBuilder_INTERNAL(logger, NV_TENSORRT_VERSION));
6464
}
65-
nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) {
65+
nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger* logger) {
6666
return static_cast<nvinfer1::IRuntime*>(
67-
dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION));
67+
dy::createInferRuntime_INTERNAL(logger, NV_TENSORRT_VERSION));
6868
}
6969

7070
const char* kInputTensor = "input";
@@ -74,7 +74,7 @@ const char* kOutputTensor = "output";
7474
nvinfer1::IHostMemory* CreateNetwork() {
7575
Logger logger;
7676
// Create the engine.
77-
nvinfer1::IBuilder* builder = createInferBuilder(logger);
77+
nvinfer1::IBuilder* builder = createInferBuilder(&logger);
7878
ScopedWeights weights(2.);
7979
ScopedWeights bias(3.);
8080

@@ -103,9 +103,9 @@ nvinfer1::IHostMemory* CreateNetwork() {
103103
return model;
104104
}
105105

106-
void Execute(nvinfer1::IExecutionContext& context, const float* input,
106+
void Execute(nvinfer1::IExecutionContext* context, const float* input,
107107
float* output) {
108-
const nvinfer1::ICudaEngine& engine = context.getEngine();
108+
const nvinfer1::ICudaEngine& engine = context->getEngine();
109109
// Two binds, input and output
110110
ASSERT_EQ(engine.getNbBindings(), 2);
111111
const int input_index = engine.getBindingIndex(kInputTensor);
@@ -119,7 +119,7 @@ void Execute(nvinfer1::IExecutionContext& context, const float* input,
119119
// Copy the input to the GPU, execute the network, and copy the output back.
120120
ASSERT_EQ(0, cudaMemcpyAsync(buffers[input_index], input, sizeof(float),
121121
cudaMemcpyHostToDevice, stream));
122-
context.enqueue(1, buffers, stream, nullptr);
122+
context->enqueue(1, buffers, stream, nullptr);
123123
ASSERT_EQ(0, cudaMemcpyAsync(output, buffers[output_index], sizeof(float),
124124
cudaMemcpyDeviceToHost, stream));
125125
cudaStreamSynchronize(stream);
@@ -136,7 +136,7 @@ TEST(TensorrtTest, BasicFunction) {
136136

137137
// Use the model to create an engine and an execution context.
138138
Logger logger;
139-
nvinfer1::IRuntime* runtime = createInferRuntime(logger);
139+
nvinfer1::IRuntime* runtime = createInferRuntime(&logger);
140140
nvinfer1::ICudaEngine* engine =
141141
runtime->deserializeCudaEngine(model->data(), model->size(), nullptr);
142142
model->destroy();
@@ -145,7 +145,7 @@ TEST(TensorrtTest, BasicFunction) {
145145
// Execute the network.
146146
float input = 1234;
147147
float output;
148-
Execute(*context, &input, &output);
148+
Execute(context, &input, &output);
149149
EXPECT_EQ(output, input * 2 + 3);
150150

151151
// Destroy the engine.

0 commit comments

Comments
 (0)