Skip to content

Commit c5c7dc2

Browse files
authored
Fix CPPLint errors in multiclass_nms, nccl, nce, reduce and save_load_combine (#10032)
* Fix CPPLint errors in multiclass_nms, nccl, nce, reduce and save_load_combine * Fix
1 parent 598035f commit c5c7dc2

File tree

5 files changed

+70
-68
lines changed

5 files changed

+70
-68
lines changed

paddle/fluid/operators/multiclass_nms_op.cc

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -173,8 +173,8 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
173173

174174
void MultiClassNMS(const framework::ExecutionContext& ctx,
175175
const Tensor& scores, const Tensor& bboxes,
176-
std::map<int, std::vector<int>>& indices,
177-
int& num_nmsed_out) const {
176+
std::map<int, std::vector<int>>* indices,
177+
int* num_nmsed_out) const {
178178
int64_t background_label = ctx.Attr<int>("background_label");
179179
int64_t nms_top_k = ctx.Attr<int>("nms_top_k");
180180
int64_t keep_top_k = ctx.Attr<int>("keep_top_k");
@@ -189,15 +189,15 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
189189
if (c == background_label) continue;
190190
Tensor score = scores.Slice(c, c + 1);
191191
NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta, nms_top_k,
192-
&(indices[c]));
193-
num_det += indices[c].size();
192+
&((*indices)[c]));
193+
num_det += (*indices)[c].size();
194194
}
195195

196-
num_nmsed_out = num_det;
196+
*num_nmsed_out = num_det;
197197
const T* scores_data = scores.data<T>();
198198
if (keep_top_k > -1 && num_det > keep_top_k) {
199199
std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
200-
for (const auto& it : indices) {
200+
for (const auto& it : *indices) {
201201
int label = it.first;
202202
const T* sdata = scores_data + label * predict_dim;
203203
const std::vector<int>& label_indices = it.second;
@@ -220,13 +220,13 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
220220
int idx = score_index_pairs[j].second.second;
221221
new_indices[label].push_back(idx);
222222
}
223-
new_indices.swap(indices);
224-
num_nmsed_out = keep_top_k;
223+
new_indices.swap(*indices);
224+
*num_nmsed_out = keep_top_k;
225225
}
226226
}
227227

228228
void MultiClassOutput(const Tensor& scores, const Tensor& bboxes,
229-
std::map<int, std::vector<int>>& selected_indices,
229+
const std::map<int, std::vector<int>>& selected_indices,
230230
Tensor* outs) const {
231231
int predict_dim = scores.dims()[1];
232232
auto* scores_data = scores.data<T>();
@@ -273,7 +273,7 @@ class MultiClassNMSKernel : public framework::OpKernel<T> {
273273

274274
std::map<int, std::vector<int>> indices;
275275
int num_nmsed_out = 0;
276-
MultiClassNMS(ctx, ins_score, ins_boxes, indices, num_nmsed_out);
276+
MultiClassNMS(ctx, ins_score, ins_boxes, &indices, &num_nmsed_out);
277277
all_indices.push_back(indices);
278278
batch_starts.push_back(batch_starts.back() + num_nmsed_out);
279279
}

paddle/fluid/operators/nccl_op.cu.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,9 @@ class NCCLBcastKernel : public framework::OpKernel<T> {
135135
auto* x = ctx.Input<LoDTensor>("X");
136136
VLOG(3) << "gpu : " << gpu_id << " invoke Bcast. send " << x->numel();
137137
PADDLE_ENFORCE(platform::dynload::ncclBcast(
138-
(void*)x->data<T>(), x->numel(), NCCLTypeWrapper<T>::type, root,
139-
comm->comms().at(idx), ctx.cuda_device_context().stream()));
138+
reinterpret_cast<void*>(const_cast<T*>(x->data<T>())), x->numel(),
139+
NCCLTypeWrapper<T>::type, root, comm->comms().at(idx),
140+
ctx.cuda_device_context().stream()));
140141
VLOG(3) << "gpu : " << gpu_id << " finished Bcast.";
141142
} else {
142143
auto* out = ctx.Output<LoDTensor>("Out");

paddle/fluid/operators/nce_op.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License. */
1616

1717
#include <math.h>
1818
#include <random>
19+
#include <vector>
1920
#include "paddle/fluid/framework/eigen.h"
2021
#include "paddle/fluid/framework/op_registry.h"
2122
#include "unsupported/Eigen/CXX11/Tensor"
@@ -108,7 +109,7 @@ class NCEKernel : public framework::OpKernel<T> {
108109
auto weight_mat = EigenMatrix<T>::From(*(context.Input<Tensor>("Weight")));
109110
for (int64_t i = 0; i < sample_labels->numel(); ++i) {
110111
Eigen::Tensor<T, 0, Eigen::RowMajor, Eigen::DenseIndex> result =
111-
(input_mat.chip((int)(i / sample_labels->dims()[1]), 0) *
112+
(input_mat.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) *
112113
weight_mat.chip(sample_labels_data[i], 0))
113114
.sum();
114115
sample_out_data[i] += result(0);
@@ -190,7 +191,7 @@ class NCEGradKernel : public framework::OpKernel<T> {
190191
auto x_matrix = EigenMatrix<T>::From(*(context.Input<Tensor>("Input")));
191192
for (int64_t i = 0; i < sample_labels->numel(); ++i) {
192193
d_w_matrix.chip(sample_labels_data[i], 0) +=
193-
x_matrix.chip((int)(i / sample_labels->dims()[1]), 0) *
194+
x_matrix.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) *
194195
sample_grad_data[i];
195196
}
196197
}
@@ -202,7 +203,7 @@ class NCEGradKernel : public framework::OpKernel<T> {
202203
auto d_x_matrix = EigenMatrix<T>::From(*d_x);
203204
auto w_matrix = EigenMatrix<T>::From(*(context.Input<Tensor>("Weight")));
204205
for (int64_t i = 0; i < sample_labels->numel(); ++i) {
205-
d_x_matrix.chip((int)(i / sample_labels->dims()[1]), 0) +=
206+
d_x_matrix.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) +=
206207
w_matrix.chip(sample_labels_data[i], 0) * sample_grad_data[i];
207208
}
208209
}

paddle/fluid/operators/reduce_op.h

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -35,77 +35,77 @@ using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
3535

3636
struct SumFunctor {
3737
template <typename DeviceContext, typename X, typename Y, typename Dim>
38-
void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) {
39-
y.device(place) = x.sum(dim);
38+
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
39+
y->device(place) = x->sum(dim);
4040
}
4141
};
4242

4343
struct SumGradFunctor {
4444
template <typename DeviceContext, typename X, typename Y, typename DX,
4545
typename DY, typename Dim>
46-
void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy,
46+
void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy,
4747
const Dim& dim, int size) {
48-
dx.device(place) = dy.broadcast(dim);
48+
dx->device(place) = dy->broadcast(dim);
4949
}
5050
};
5151

5252
struct MeanFunctor {
5353
template <typename DeviceContext, typename X, typename Y, typename Dim>
54-
void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) {
55-
y.device(place) = x.mean(dim);
54+
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
55+
y->device(place) = x->mean(dim);
5656
}
5757
};
5858

5959
struct MeanGradFunctor {
6060
template <typename DeviceContext, typename X, typename Y, typename DX,
6161
typename DY, typename Dim>
62-
void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy,
62+
void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy,
6363
const Dim& dim, int size) {
64-
dx.device(place) = dy.broadcast(dim) / dx.constant(size);
64+
dx->device(place) = dy->broadcast(dim) / dx->constant(size);
6565
}
6666
};
6767

6868
struct MaxFunctor {
6969
template <typename DeviceContext, typename X, typename Y, typename Dim>
70-
void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) {
71-
y.device(place) = x.maximum(dim);
70+
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
71+
y->device(place) = x->maximum(dim);
7272
}
7373
};
7474

7575
struct MinFunctor {
7676
template <typename DeviceContext, typename X, typename Y, typename Dim>
77-
void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) {
78-
y.device(place) = x.minimum(dim);
77+
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
78+
y->device(place) = x->minimum(dim);
7979
}
8080
};
8181

8282
struct MaxOrMinGradFunctor {
8383
template <typename DeviceContext, typename X, typename Y, typename DX,
8484
typename DY, typename Dim>
85-
void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy,
85+
void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy,
8686
const Dim& dim, int size) {
87-
auto equals = x == y.broadcast(dim);
88-
auto ones = dx.constant(1);
89-
auto zeros = dx.constant(0);
87+
auto equals = (*x) == y->broadcast(dim);
88+
auto ones = dx->constant(1);
89+
auto zeros = dx->constant(0);
9090
// If there are multiple minimum or maximum elements, the subgradient of
9191
// each is the set [0, 1], and we pass gradient to all of them here.
92-
dx.device(place) = dy.broadcast(dim) * equals.select(ones, zeros);
92+
dx->device(place) = dy->broadcast(dim) * equals.select(ones, zeros);
9393
}
9494
};
9595

9696
struct ProdFunctor {
9797
template <typename DeviceContext, typename X, typename Y, typename Dim>
98-
void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) {
99-
y.device(place) = x.prod(dim);
98+
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
99+
y->device(place) = x->prod(dim);
100100
}
101101
};
102102

103103
struct ProdGradFunctor {
104104
template <typename DeviceContext, typename X, typename Y, typename DX,
105105
typename DY, typename Dim>
106-
void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy,
106+
void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy,
107107
const Dim& dim, int size) {
108-
dx.device(place) = dy.broadcast(dim) * y.broadcast(dim) * x.inverse();
108+
dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse();
109109
}
110110
};
111111

@@ -125,7 +125,7 @@ class ReduceKernel : public framework::OpKernel<T> {
125125
*context.template device_context<DeviceContext>().eigen_device();
126126
auto reduce_dim = Eigen::array<int, 1>({{0}});
127127
Functor functor;
128-
functor(place, x, out, reduce_dim);
128+
functor(place, &x, &out, reduce_dim);
129129
} else {
130130
int rank = context.Input<Tensor>("X")->dims().size();
131131
switch (rank) {
@@ -178,10 +178,10 @@ class ReduceKernel : public framework::OpKernel<T> {
178178

179179
if (D == 1) {
180180
auto out = EigenScalar<T>::From(*output);
181-
functor(place, x, out, reduce_dim);
181+
functor(place, &x, &out, reduce_dim);
182182
} else {
183183
auto out = EigenTensor<T, (D - 1)>::From(*output, dims);
184-
functor(place, x, out, reduce_dim);
184+
functor(place, &x, &out, reduce_dim);
185185
}
186186
}
187187
};
@@ -206,7 +206,7 @@ class ReduceGradKernel : public framework::OpKernel<T> {
206206
auto broadcast_dim =
207207
Eigen::array<int, 1>({{static_cast<int>(input0->numel())}});
208208
Functor functor;
209-
functor(place, x, x_reduce, x_grad, x_reduce_grad, broadcast_dim,
209+
functor(place, &x, &x_reduce, &x_grad, &x_reduce_grad, broadcast_dim,
210210
broadcast_dim[0]);
211211
} else {
212212
int rank = context.Input<Tensor>("X")->dims().size();
@@ -258,7 +258,7 @@ class ReduceGradKernel : public framework::OpKernel<T> {
258258
auto& place =
259259
*context.template device_context<DeviceContext>().eigen_device();
260260
Functor functor;
261-
functor(place, x, x_reduce, x_grad, x_reduce_grad, broadcast_dim,
261+
functor(place, &x, &x_reduce, &x_grad, &x_reduce_grad, broadcast_dim,
262262
broadcast_dim[dim]);
263263
}
264264
};

paddle/fluid/operators/save_load_combine_op_test.cc

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,17 @@ USE_NO_KERNEL_OP(load_combine);
2323

2424
int* CreateForSaveCombineOp(int x, int y, const std::vector<int>& lod_info,
2525
std::string var_name,
26-
paddle::platform::CPUPlace& place,
27-
paddle::framework::Scope& scope,
28-
paddle::framework::LoD& expect_lod) {
29-
auto var = scope.Var(var_name);
26+
const paddle::platform::CPUPlace& place,
27+
paddle::framework::Scope* scope,
28+
paddle::framework::LoD* expect_lod) {
29+
auto var = scope->Var(var_name);
3030
auto tensor = var->GetMutable<paddle::framework::LoDTensor>();
3131
tensor->Resize({x, y});
32-
expect_lod.resize(1);
32+
expect_lod->resize(1);
3333
for (size_t i = 0; i < lod_info.size(); i++) {
34-
expect_lod[0].push_back(lod_info[i]);
34+
(*expect_lod)[0].push_back(lod_info[i]);
3535
}
36-
tensor->set_lod(expect_lod);
36+
tensor->set_lod(*expect_lod);
3737
int* expect = tensor->mutable_data<int>(place);
3838
for (int64_t i = 0; i < tensor->numel(); ++i) {
3939
expect[i] = static_cast<int>(i);
@@ -42,17 +42,17 @@ int* CreateForSaveCombineOp(int x, int y, const std::vector<int>& lod_info,
4242
}
4343

4444
paddle::framework::LoDTensor* GeneratePlaceholderBeforeLoad(
45-
const std::string out_var_name, paddle::framework::Scope& scope) {
46-
auto load_var = scope.Var(out_var_name);
45+
const std::string out_var_name, paddle::framework::Scope* scope) {
46+
auto load_var = scope->Var(out_var_name);
4747
auto target = load_var->GetMutable<paddle::framework::LoDTensor>();
4848
return target;
4949
}
5050

5151
int* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target,
52-
paddle::framework::Scope& scope,
53-
paddle::framework::LoD& actual_lod) {
52+
const paddle::framework::Scope& scope,
53+
paddle::framework::LoD* actual_lod) {
5454
int* actual = target->data<int>();
55-
actual_lod = target->lod();
55+
*actual_lod = target->lod();
5656
return actual;
5757
}
5858

@@ -78,26 +78,26 @@ TEST(SaveLoadCombineOp, CPU) {
7878
std::vector<int> lod1 = {0, 1, 2, 3, 10};
7979
int numel1 = 100;
8080
paddle::framework::LoD expect_lod1;
81-
int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", place, scope,
82-
expect_lod1);
81+
int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", place,
82+
&scope, &expect_lod1);
8383

8484
std::vector<int> lod2 = {0, 2, 5, 10};
8585
int numel2 = 200;
8686
paddle::framework::LoD expect_lod2;
87-
int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", place, scope,
88-
expect_lod2);
87+
int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", place,
88+
&scope, &expect_lod2);
8989

9090
std::vector<int> lod3 = {0, 2, 3, 20};
9191
int numel3 = 4000;
9292
paddle::framework::LoD expect_lod3;
9393
int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", place,
94-
scope, expect_lod3);
94+
&scope, &expect_lod3);
9595

9696
std::vector<int> lod4 = {0, 1, 20};
9797
int numel4 = 1000;
9898
paddle::framework::LoD expect_lod4;
99-
int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", place, scope,
100-
expect_lod4);
99+
int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", place,
100+
&scope, &expect_lod4);
101101

102102
// Set attributes
103103
std::string filename = "check_tensor.ls";
@@ -111,10 +111,10 @@ TEST(SaveLoadCombineOp, CPU) {
111111
save_combine_op->Run(scope, place);
112112

113113
// Set up output vars
114-
auto target1 = GeneratePlaceholderBeforeLoad("out_var1", scope);
115-
auto target2 = GeneratePlaceholderBeforeLoad("out_var2", scope);
116-
auto target3 = GeneratePlaceholderBeforeLoad("out_var3", scope);
117-
auto target4 = GeneratePlaceholderBeforeLoad("out_var4", scope);
114+
auto target1 = GeneratePlaceholderBeforeLoad("out_var1", &scope);
115+
auto target2 = GeneratePlaceholderBeforeLoad("out_var2", &scope);
116+
auto target3 = GeneratePlaceholderBeforeLoad("out_var3", &scope);
117+
auto target4 = GeneratePlaceholderBeforeLoad("out_var4", &scope);
118118

119119
// Run the load_combine_op
120120
auto load_combine_op = paddle::framework::OpRegistry::CreateOp(
@@ -123,10 +123,10 @@ TEST(SaveLoadCombineOp, CPU) {
123123
load_combine_op->Run(scope, place);
124124

125125
paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4;
126-
int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, actual_lod1);
127-
int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, actual_lod2);
128-
int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, actual_lod3);
129-
int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, actual_lod4);
126+
int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, &actual_lod1);
127+
int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, &actual_lod2);
128+
int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, &actual_lod3);
129+
int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, &actual_lod4);
130130

131131
CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1);
132132
CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2);

0 commit comments

Comments
 (0)