Skip to content

Commit 819ac3d

Browse files
authored
Modify style (#12465)
1 parent 046de2a commit 819ac3d

File tree

8 files changed

+22
-17
lines changed

8 files changed

+22
-17
lines changed

paddle/fluid/inference/analysis/argument.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323

2424
#pragma once
2525

26+
#include <string>
2627
#include "paddle/fluid/framework/program_desc.h"
2728
#include "paddle/fluid/inference/analysis/data_flow_graph.h"
2829

paddle/fluid/inference/analysis/data_flow_graph.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ struct GraphTraits<DataFlowGraph> {
176176
// sub-graph is the inputs nodes and output nodes that doesn't inside the
177177
// sub-graph.
178178
std::pair<std::vector<Node *>, std::vector<Node *>>
179-
ExtractInputAndOutputOfSubGraph(std::vector<Node *> &graph);
179+
ExtractInputAndOutputOfSubGraph(std::vector<Node *> &graph); // NOLINT
180180

181181
} // namespace analysis
182182
} // namespace inference

paddle/fluid/inference/analysis/model_store_pass.cc

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,13 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15-
#include "paddle/fluid/inference/analysis/model_store_pass.h"
1615
#include <stdio.h>
1716
#include <stdlib.h>
17+
#include <string>
18+
1819
#include "paddle/fluid/inference/analysis/analyzer.h"
1920
#include "paddle/fluid/inference/analysis/argument.h"
21+
#include "paddle/fluid/inference/analysis/model_store_pass.h"
2022

2123
namespace paddle {
2224
namespace inference {

paddle/fluid/inference/analysis/model_store_pass.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
* model in the disk, and that model can be reloaded for prediction.
1818
*/
1919

20+
#pragma once
21+
#include <string>
2022
#include "paddle/fluid/inference/analysis/pass.h"
2123

2224
namespace paddle {

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ limitations under the License. */
2020
#include <glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files.
2121
#include <fstream>
2222
#include <iostream>
23+
#include "paddle/fluid/inference/demo_ci/utils.h"
2324
#include "paddle/fluid/platform/enforce.h"
24-
#include "utils.h"
2525

2626
#ifdef PADDLE_WITH_CUDA
2727
DECLARE_double(fraction_of_gpu_memory_to_use);

paddle/fluid/inference/api/paddle_inference_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ class PaddleBuf {
4444
PaddleBuf(void* data, size_t length)
4545
: data_(data), length_(length), memory_owned_{false} {}
4646
// Own memory.
47-
PaddleBuf(size_t length)
47+
explicit PaddleBuf(size_t length)
4848
: data_(new char[length]), length_(length), memory_owned_(true) {}
4949
// Resize to `length` bytes.
5050
void Resize(size_t length);

paddle/fluid/inference/tensorrt/convert/fc_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides,
3838
}
3939
// indata c * k
4040
// Reorder the data layout from CK to KC.
41-
void ReorderCKtoKC(TensorRTEngine::Weight& iweights,
41+
void ReorderCKtoKC(TensorRTEngine::Weight& iweights, // NOLINT
4242
TensorRTEngine::Weight* oweights) {
4343
int c = iweights.dims[0];
4444
int k = iweights.dims[1];

paddle/fluid/operators/conv_mkldnn_op.cc

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler {
5555

5656
std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromWeightsPrimitive(
5757
const std::shared_ptr<mkldnn::memory> user_memory_p,
58-
std::vector<mkldnn::primitive>& pipeline) {
58+
std::vector<mkldnn::primitive>& pipeline) { // NOLINT
5959
auto src_pd = conv_bwd_weights_pd_->src_primitive_desc();
6060
auto user_pd = user_memory_p->get_primitive_desc();
6161
return this->AcquireMemory(src_pd, user_pd, user_memory_p,
@@ -64,7 +64,7 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler {
6464

6565
std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromWeightsPrimitive(
6666
const std::shared_ptr<mkldnn::memory> user_memory_p,
67-
std::vector<mkldnn::primitive>& pipeline) {
67+
std::vector<mkldnn::primitive>& pipeline) { // NOLINT
6868
auto diff_dst_pd = conv_bwd_weights_pd_->diff_dst_primitive_desc();
6969
auto user_pd = user_memory_p->get_primitive_desc();
7070
return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
@@ -80,7 +80,7 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler {
8080

8181
std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromDataPrimitive(
8282
const std::shared_ptr<mkldnn::memory> user_memory_p,
83-
std::vector<mkldnn::primitive>& pipeline) {
83+
std::vector<mkldnn::primitive>& pipeline) { // NOLINT
8484
auto diff_dst_pd = conv_bwd_data_pd_->diff_dst_primitive_desc();
8585
auto user_pd = user_memory_p->get_primitive_desc();
8686
return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
@@ -89,7 +89,7 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler {
8989

9090
std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromDataPrimitive(
9191
const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
92-
std::vector<mkldnn::primitive>& pipeline) {
92+
std::vector<mkldnn::primitive>& pipeline) { // NOLINT
9393
auto weights_pd = conv_bwd_data_pd_->weights_primitive_desc();
9494
auto user_pd = user_weights_memory_p->get_primitive_desc();
9595
return this->AcquireMemory(weights_pd, user_pd, user_weights_memory_p,
@@ -109,7 +109,7 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler {
109109

110110
std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromPrimitive(
111111
const std::shared_ptr<mkldnn::memory> user_memory_p,
112-
std::vector<mkldnn::primitive>& pipeline) {
112+
std::vector<mkldnn::primitive>& pipeline) { // NOLINT
113113
auto src_pd = conv_pd_->src_primitive_desc();
114114
auto user_pd = user_memory_p->get_primitive_desc();
115115
return this->AcquireMemory(src_pd, user_pd, user_memory_p, "@src_mem_p",
@@ -118,7 +118,7 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler {
118118

119119
std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromPrimitive(
120120
const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
121-
std::vector<mkldnn::primitive>& pipeline) {
121+
std::vector<mkldnn::primitive>& pipeline) { // NOLINT
122122
auto user_weights_pd = user_weights_memory_p->get_primitive_desc();
123123
auto weights_pd = conv_pd_->weights_primitive_desc();
124124
return this->AcquireMemory(weights_pd, user_weights_pd,
@@ -197,12 +197,12 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler {
197197

198198
// Generate keys for storing/retriving primitives for this operator
199199
// TODO(jczaja): Make hashing function more optimial
200-
static std::string GetHash(memory::dims& input_dims,
201-
memory::dims& weights_dims,
202-
std::vector<int>& strides,
203-
std::vector<int>& paddings,
204-
std::vector<int>& dilations, int groups,
205-
const std::string& suffix) {
200+
static std::string GetHash(memory::dims& input_dims, // NOLINT
201+
memory::dims& weights_dims, // NOLINT
202+
std::vector<int>& strides, // NOLINT
203+
std::vector<int>& paddings, // NOLINT
204+
std::vector<int>& dilations, // NOLINT
205+
int groups, const std::string& suffix) {
206206
return dims2str(input_dims) + dims2str(weights_dims) + dims2str(strides) +
207207
dims2str(paddings) + dims2str(dilations) + std::to_string(groups) +
208208
suffix;

0 commit comments

Comments
 (0)