Skip to content

Commit e735359

Browse files
authored
Fix more CPPlint issues in fluid/operators/math (#10249)
* Fix CPPLint errors * Fix CPPLint errors in sequence2batch * Fix compilation * Fix LSTM op and GRU op * Fix LSTMP op * Fix more cpplint errors in operators/math * Address Code review feedback
1 parent 6e0b47b commit e735359

17 files changed

+106
-99
lines changed

paddle/fluid/operators/gru_op.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ inline void ReorderInitState(const DeviceContext& ctx,
3434
framework::Tensor* dst, bool indexed_src) {
3535
math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
3636
dst->mutable_data<T>(src.dims(), ctx.GetPlace());
37-
row_shuffle(ctx, src, index_lod, *dst, indexed_src);
37+
row_shuffle(ctx, src, index_lod, dst, indexed_src);
3838
}
3939

4040
template <typename DeviceContext, typename T>
@@ -61,7 +61,7 @@ class GRUKernel : public framework::OpKernel<T> {
6161
bool is_reverse = context.Attr<bool>("is_reverse");
6262
math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
6363
auto& dev_ctx = context.template device_context<DeviceContext>();
64-
to_batch(dev_ctx, *input, *batch_gate, true, is_reverse);
64+
to_batch(dev_ctx, *input, batch_gate, true, is_reverse);
6565

6666
if (bias) {
6767
math::RowwiseAdd<DeviceContext, T> add_bias;
@@ -113,7 +113,7 @@ class GRUKernel : public framework::OpKernel<T> {
113113

114114
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
115115
batch_hidden->set_lod(batch_gate->lod());
116-
to_seq(dev_ctx, *batch_hidden, *hidden);
116+
to_seq(dev_ctx, *batch_hidden, hidden);
117117
}
118118

119119
void Compute(const framework::ExecutionContext& context) const override {
@@ -174,7 +174,7 @@ class GRUGradKernel : public framework::OpKernel<T> {
174174

175175
bool is_reverse = context.Attr<bool>("is_reverse");
176176
batch_hidden_grad.set_lod(batch_hidden->lod());
177-
to_batch(dev_ctx, *hidden_grad, batch_hidden_grad, false, is_reverse);
177+
to_batch(dev_ctx, *hidden_grad, &batch_hidden_grad, false, is_reverse);
178178

179179
math::GRUMetaValue<T> gru_value;
180180
gru_value.gate_weight = const_cast<T*>(weight_data);
@@ -236,7 +236,7 @@ class GRUGradKernel : public framework::OpKernel<T> {
236236
input_grad->mutable_data<T>(context.GetPlace());
237237
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
238238
batch_gate_grad.set_lod(batch_gate->lod());
239-
to_seq(dev_ctx, batch_gate_grad, *input_grad);
239+
to_seq(dev_ctx, batch_gate_grad, input_grad);
240240
}
241241
if (bias_grad) {
242242
bias_grad->mutable_data<T>(context.GetPlace());

paddle/fluid/operators/lstm_op.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ inline void ReorderInitState(const DeviceContext& ctx,
3333
framework::Tensor* dst, bool indexed_src) {
3434
math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
3535
dst->mutable_data<T>(src.dims(), ctx.GetPlace());
36-
row_shuffle(ctx, src, index_lod, *dst, indexed_src);
36+
row_shuffle(ctx, src, index_lod, dst, indexed_src);
3737
}
3838

3939
template <typename DeviceContext, typename T>
@@ -57,7 +57,7 @@ class LSTMKernel : public framework::OpKernel<T> {
5757
bool is_reverse = ctx.Attr<bool>("is_reverse");
5858
math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
5959
auto& device_ctx = ctx.template device_context<DeviceContext>();
60-
to_batch(device_ctx, *input, *batch_gate, true, is_reverse);
60+
to_batch(device_ctx, *input, batch_gate, true, is_reverse);
6161

6262
auto in_dims = input->dims();
6363
int frame_size = static_cast<int>(in_dims[1] / 4);
@@ -161,11 +161,11 @@ class LSTMKernel : public framework::OpKernel<T> {
161161
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
162162
batch_hidden.set_lod(batch_gate->lod());
163163
// restore the output hidden in LoDTensor from the batch hidden
164-
to_seq(device_ctx, batch_hidden, *hidden_out);
164+
to_seq(device_ctx, batch_hidden, hidden_out);
165165

166166
batch_cell.set_lod(batch_gate->lod());
167167
// restore the output cell state in LoDTensor from the batch cell
168-
to_seq(device_ctx, batch_cell, *cell_out);
168+
to_seq(device_ctx, batch_cell, cell_out);
169169
}
170170
};
171171

@@ -257,7 +257,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
257257
const framework::DDim& dims, framework::LoDTensor& dst) {
258258
dst.mutable_data<T>(dims, ctx.GetPlace());
259259
dst.set_lod(batch_gate->lod());
260-
to_batch(ctx, src, dst, false);
260+
to_batch(ctx, src, &dst, false);
261261
};
262262

263263
LoDTensor batch_hidden, batch_hidden_g, batch_cell;
@@ -351,7 +351,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
351351
if (in_g) {
352352
/* backward data */
353353
in_g->mutable_data<T>(ctx.GetPlace());
354-
to_seq(device_ctx, batch_gate_g, *in_g);
354+
to_seq(device_ctx, batch_gate_g, in_g);
355355
}
356356
if (bias && bias_g) {
357357
/* backward bias */

paddle/fluid/operators/lstmp_op.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ inline void ReorderInitState(const DeviceContext& ctx,
4040
framework::Tensor* dst, bool indexed_src) {
4141
math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
4242
dst->mutable_data<T>(src.dims(), ctx.GetPlace());
43-
row_shuffle(ctx, src, index, *dst, indexed_src);
43+
row_shuffle(ctx, src, index, dst, indexed_src);
4444
}
4545

4646
template <typename DeviceContext, typename T>
@@ -81,7 +81,7 @@ class LSTMPKernel : public framework::OpKernel<T> {
8181
bool is_reverse = ctx.Attr<bool>("is_reverse");
8282
math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
8383
auto& device_ctx = ctx.template device_context<DeviceContext>();
84-
to_batch(device_ctx, *input, *batch_gate, true, is_reverse);
84+
to_batch(device_ctx, *input, batch_gate, true, is_reverse);
8585

8686
auto in_dims = input->dims();
8787
int frame_size = static_cast<int>(in_dims[1] / 4);
@@ -208,11 +208,11 @@ class LSTMPKernel : public framework::OpKernel<T> {
208208
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
209209
batch_proj.set_lod(batch_gate->lod());
210210
// restore the output hidden in LoDTensor from the batch hidden
211-
to_seq(device_ctx, batch_proj, *proj_out);
211+
to_seq(device_ctx, batch_proj, proj_out);
212212

213213
batch_cell.set_lod(batch_gate->lod());
214214
// restore the output cell state in LoDTensor from the batch cell
215-
to_seq(device_ctx, batch_cell, *cell_out);
215+
to_seq(device_ctx, batch_cell, cell_out);
216216
}
217217
};
218218

@@ -332,7 +332,7 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
332332
const framework::DDim& dims, framework::LoDTensor& dst) {
333333
dst.mutable_data<T>(dims, ctx.GetPlace());
334334
dst.set_lod(batch_gate->lod());
335-
to_batch(ctx, src, dst, false);
335+
to_batch(ctx, src, &dst, false);
336336
};
337337

338338
LoDTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell;
@@ -471,7 +471,7 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
471471
if (in_g) {
472472
/* backward data */
473473
in_g->mutable_data<T>(ctx.GetPlace());
474-
to_seq(device_ctx, batch_gate_g, *in_g);
474+
to_seq(device_ctx, batch_gate_g, in_g);
475475
}
476476
if (bias && bias_g) {
477477
/* backward bias */

paddle/fluid/operators/math/concat_test.cc

Lines changed: 38 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,14 @@ limitations under the License. */
1717
#include <vector>
1818
#include "paddle/fluid/framework/tensor_util.h"
1919

20-
using namespace paddle::framework;
21-
using namespace paddle::platform;
22-
2320
template <typename DeviceContext, typename Place>
2421
void testConcat() {
25-
Tensor input_a_cpu;
26-
Tensor input_b_cpu;
27-
Tensor out_cpu;
28-
Tensor input_a;
29-
Tensor input_b;
30-
Tensor out;
22+
paddle::framework::Tensor input_a_cpu;
23+
paddle::framework::Tensor input_b_cpu;
24+
paddle::framework::Tensor out_cpu;
25+
paddle::framework::Tensor input_a;
26+
paddle::framework::Tensor input_b;
27+
paddle::framework::Tensor out;
3128

3229
DeviceContext* context = new DeviceContext(Place());
3330
// DeviceContext context(Place());
@@ -40,18 +37,18 @@ void testConcat() {
4037
* output:
4138
* out.shape: [5, 3, 4]
4239
*/
43-
auto dim_a = make_ddim({2, 3, 4});
44-
auto dim_b = make_ddim({3, 3, 4});
45-
auto dim_out = make_ddim({5, 3, 4});
40+
auto dim_a = paddle::framework::make_ddim({2, 3, 4});
41+
auto dim_b = paddle::framework::make_ddim({3, 3, 4});
42+
auto dim_out = paddle::framework::make_ddim({5, 3, 4});
4643

4744
input_a.mutable_data<int>(dim_a, Place());
4845
input_b.mutable_data<int>(dim_b, Place());
4946
out.mutable_data<int>(dim_out, Place());
5047

5148
if (paddle::platform::is_gpu_place(Place())) {
52-
input_a_cpu.mutable_data<int>(dim_a, CPUPlace());
53-
input_b_cpu.mutable_data<int>(dim_b, CPUPlace());
54-
out_cpu.mutable_data<int>(dim_out, CPUPlace());
49+
input_a_cpu.mutable_data<int>(dim_a, paddle::platform::CPUPlace());
50+
input_b_cpu.mutable_data<int>(dim_b, paddle::platform::CPUPlace());
51+
out_cpu.mutable_data<int>(dim_out, paddle::platform::CPUPlace());
5552
}
5653

5754
int* a_ptr;
@@ -72,11 +69,11 @@ void testConcat() {
7269
}
7370

7471
if (paddle::platform::is_gpu_place(Place())) {
75-
TensorCopySync(input_a_cpu, Place(), &input_a);
76-
TensorCopySync(input_b_cpu, Place(), &input_b);
72+
paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
73+
paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
7774
}
7875

79-
std::vector<Tensor> input;
76+
std::vector<paddle::framework::Tensor> input;
8077
input.push_back(input_a);
8178
input.push_back(input_b);
8279

@@ -89,7 +86,8 @@ void testConcat() {
8986

9087
int* out_ptr;
9188
if (paddle::platform::is_gpu_place(Place())) {
92-
TensorCopySync(out, CPUPlace(), &out_cpu);
89+
paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
90+
&out_cpu);
9391
out_ptr = out_cpu.data<int>();
9492
} else {
9593
out_ptr = out.data<int>();
@@ -115,9 +113,9 @@ void testConcat() {
115113
* output:
116114
* out.shape: [2, 7, 4]
117115
*/
118-
dim_a = make_ddim({2, 3, 4});
119-
dim_b = make_ddim({2, 4, 4});
120-
dim_out = make_ddim({2, 7, 4});
116+
dim_a = paddle::framework::make_ddim({2, 3, 4});
117+
dim_b = paddle::framework::make_ddim({2, 4, 4});
118+
dim_out = paddle::framework::make_ddim({2, 7, 4});
121119

122120
input_a.Resize(dim_a);
123121
input_b.Resize(dim_b);
@@ -144,8 +142,8 @@ void testConcat() {
144142
}
145143

146144
if (paddle::platform::is_gpu_place(Place())) {
147-
TensorCopySync(input_a_cpu, Place(), &input_a);
148-
TensorCopySync(input_b_cpu, Place(), &input_b);
145+
paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
146+
paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
149147
}
150148

151149
input.clear();
@@ -159,7 +157,8 @@ void testConcat() {
159157
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
160158

161159
if (paddle::platform::is_gpu_place(Place())) {
162-
TensorCopySync(out, CPUPlace(), &out_cpu);
160+
paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
161+
&out_cpu);
163162
out_ptr = out_cpu.data<int>();
164163
} else {
165164
out_ptr = out.data<int>();
@@ -187,9 +186,9 @@ void testConcat() {
187186
* output:
188187
* out.shape: [2, 3, 9]
189188
*/
190-
dim_a = make_ddim({2, 3, 4});
191-
dim_b = make_ddim({2, 3, 5});
192-
dim_out = make_ddim({2, 3, 9});
189+
dim_a = paddle::framework::make_ddim({2, 3, 4});
190+
dim_b = paddle::framework::make_ddim({2, 3, 5});
191+
dim_out = paddle::framework::make_ddim({2, 3, 9});
193192

194193
input_a.Resize(dim_a);
195194
input_b.Resize(dim_b);
@@ -216,8 +215,8 @@ void testConcat() {
216215
}
217216

218217
if (paddle::platform::is_gpu_place(Place())) {
219-
TensorCopySync(input_a_cpu, Place(), &input_a);
220-
TensorCopySync(input_b_cpu, Place(), &input_b);
218+
paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
219+
paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
221220
}
222221

223222
input.clear();
@@ -231,7 +230,8 @@ void testConcat() {
231230
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
232231

233232
if (paddle::platform::is_gpu_place(Place())) {
234-
TensorCopySync(out, CPUPlace(), &out_cpu);
233+
paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
234+
&out_cpu);
235235
out_ptr = out_cpu.data<int>();
236236
} else {
237237
out_ptr = out.data<int>();
@@ -261,9 +261,9 @@ void testConcat() {
261261
* output:
262262
* out.shape: [2, 6, 4]
263263
*/
264-
dim_a = make_ddim({2, 3, 4});
265-
dim_b = make_ddim({2, 3, 4});
266-
dim_out = make_ddim({2, 6, 4});
264+
dim_a = paddle::framework::make_ddim({2, 3, 4});
265+
dim_b = paddle::framework::make_ddim({2, 3, 4});
266+
dim_out = paddle::framework::make_ddim({2, 6, 4});
267267

268268
input_a.Resize(dim_a);
269269
input_b.Resize(dim_b);
@@ -290,8 +290,8 @@ void testConcat() {
290290
}
291291

292292
if (paddle::platform::is_gpu_place(Place())) {
293-
TensorCopySync(input_a_cpu, Place(), &input_a);
294-
TensorCopySync(input_b_cpu, Place(), &input_b);
293+
paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
294+
paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
295295
}
296296

297297
input.clear();
@@ -305,7 +305,8 @@ void testConcat() {
305305
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
306306

307307
if (paddle::platform::is_gpu_place(Place())) {
308-
TensorCopySync(out, CPUPlace(), &out_cpu);
308+
paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
309+
&out_cpu);
309310
out_ptr = out_cpu.data<int>();
310311
} else {
311312
out_ptr = out.data<int>();

paddle/fluid/operators/math/cross_entropy.cu

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,9 @@ class CrossEntropyFunctor<platform::CUDADeviceContext, T> {
108108

109109
if (softLabel) {
110110
const T* label_data = labels->data<T>();
111-
int block = class_num > 512 ? 512 : pow(2, int(std::log2(class_num)));
111+
int block = class_num > 512
112+
? 512
113+
: pow(2, static_cast<int>(std::log2(class_num)));
112114

113115
SoftCrossEntropyKernel<T><<<
114116
batch_size, block, block * sizeof(T),

paddle/fluid/operators/math/detail/lstm_gpu_kernel.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#pragma once
16+
#include <type_traits>
17+
1618
#include "paddle/fluid/operators/math/detail/activation_functions.h"
1719
#include "paddle/fluid/operators/math/lstm_compute.h"
1820
#include "paddle/fluid/platform/cuda_helper.h"
1921
#include "paddle/fluid/platform/device_context.h"
2022

21-
#include <type_traits>
22-
2323
namespace paddle {
2424
namespace operators {
2525
namespace math {

0 commit comments

Comments
 (0)