Skip to content

Commit 31dc019

Browse files
committed
fix ContextProjectFunctor parameter order
1 parent e25bfc7 commit 31dc019

File tree

3 files changed

+33
-32
lines changed

3 files changed

+33
-32
lines changed

paddle/operators/math/context_project.h

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,10 @@ template <typename Place, typename T>
8888
class ContextProjectFunctor {
8989
public:
9090
void operator()(const platform::DeviceContext& context, const LoDTensor& in,
91-
const Tensor& padding_data, Tensor& col,
92-
bool padding_trainable, int context_start, int context_length,
93-
int context_stride, int up_pad, int down_pad) {
91+
const Tensor& padding_data, bool padding_trainable,
92+
const int context_start, const int context_length,
93+
const int context_stride, const int up_pad,
94+
const int down_pad, Tensor* col) {
9495
auto lod_level_0 = in.lod()[0];
9596

9697
math::Im2ColFunctor<math::ColFormat::kOCF, Place, float> im2col_ocf;
@@ -109,8 +110,8 @@ class ContextProjectFunctor {
109110
: static_cast<int>(lod_level_0[i]);
110111
input_row_end = static_cast<int>(lod_level_0[i + 1]);
111112

112-
Tensor out_t = col.Slice(static_cast<int>(lod_level_0[i]),
113-
static_cast<int>(lod_level_0[i + 1]));
113+
Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
114+
static_cast<int>(lod_level_0[i + 1]));
114115

115116
sequence_height = static_cast<int>(out_t.dims()[0]);
116117

@@ -133,8 +134,8 @@ class ContextProjectFunctor {
133134
}
134135
if (padding_trainable) {
135136
for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
136-
Tensor out_t = col.Slice(static_cast<int>(lod_level_0[i]),
137-
static_cast<int>(lod_level_0[i + 1]));
137+
Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
138+
static_cast<int>(lod_level_0[i + 1]));
138139

139140
sequence_height = static_cast<int>(out_t.dims()[0]);
140141

@@ -197,10 +198,11 @@ class ContextProjectFunctor {
197198
template <typename Place, typename T>
198199
class ContextProjectGradFunctor {
199200
public:
200-
void operator()(const platform::DeviceContext& context, LoDTensor& in,
201-
Tensor& padding_data, Tensor& col, bool padding_trainable,
202-
int context_start, int context_length, int context_stride,
203-
int up_pad, int down_pad, bool input_grad, bool pad_grad) {
201+
void operator()(const platform::DeviceContext& context, const LoDTensor& in,
202+
bool padding_trainable, const int context_start,
203+
const int context_length, const int context_stride,
204+
const int up_pad, const int down_pad, bool pad_grad,
205+
bool input_grad, Tensor* padding_data, Tensor* col) {
204206
auto lod_level_0 = in.lod()[0];
205207

206208
math::Col2ImFunctor<math::ColFormat::kOCF, Place, float> col2im_ocf;
@@ -220,8 +222,8 @@ class ContextProjectGradFunctor {
220222
: static_cast<int>(lod_level_0[i]);
221223
input_row_end = static_cast<int>(lod_level_0[i + 1]);
222224

223-
Tensor out_t = col.Slice(static_cast<int>(lod_level_0[i]),
224-
static_cast<int>(lod_level_0[i + 1]));
225+
Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
226+
static_cast<int>(lod_level_0[i + 1]));
225227

226228
sequence_height = static_cast<int>(out_t.dims()[0]);
227229

@@ -247,8 +249,8 @@ class ContextProjectGradFunctor {
247249
if (pad_grad) {
248250
if (padding_trainable) {
249251
for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
250-
Tensor out_t = col.Slice(static_cast<int>(lod_level_0[i]),
251-
static_cast<int>(lod_level_0[i + 1]));
252+
Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
253+
static_cast<int>(lod_level_0[i + 1]));
252254

253255
sequence_height = static_cast<int>(out_t.dims()[0]);
254256
out_t.Resize({sequence_height * context_length, sequence_width});
@@ -262,7 +264,7 @@ class ContextProjectGradFunctor {
262264
k + context_length < up_pad ? context_length : up_pad - k;
263265
Tensor out_t_sub = out_t.Slice(k * context_length,
264266
k * context_length + padding_size);
265-
Tensor w_sub = padding_data.Slice(k, k + padding_size);
267+
Tensor w_sub = padding_data->Slice(k, k + padding_size);
266268
auto out_t_sub_e = EigenMatrix<T>::From(out_t_sub);
267269
auto w_sub_e = EigenMatrix<T>::From(w_sub);
268270
w_sub_e.device(*context.GetEigenDevice<Place>()) =
@@ -295,7 +297,7 @@ class ContextProjectGradFunctor {
295297
Tensor out_t_sub = out_t.Slice(
296298
(down_pad_begin_row + t) * context_length - padding_size,
297299
(down_pad_begin_row + t) * context_length);
298-
Tensor w_sub = padding_data.Slice(
300+
Tensor w_sub = padding_data->Slice(
299301
up_pad + padding_idx, up_pad + padding_idx + padding_size);
300302
auto out_t_sub_e = EigenMatrix<T>::From(out_t_sub);
301303
auto w_sub_e = EigenMatrix<T>::From(w_sub);

paddle/operators/math/vol2col.cu

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -174,10 +174,9 @@ __global__ void col2vol(int num_kernels, const T* data_col, int depth,
174174
int data_col_index =
175175
(((((c * filter_depth + d_off) * filter_height + h_off) *
176176
filter_width +
177-
w_off) *
178-
output_detph +
179-
d_col) *
180-
output_height +
177+
w_off)));
178+
data_col_index =
179+
((data_col_index * output_detph + d_col) * output_height +
181180
h_col) *
182181
output_width +
183182
w_col;

paddle/operators/sequence_conv_op.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ class SequenceConvKernel : public framework::OpKernel<T> {
6262

6363
math::ContextProjectFunctor<Place, T> seq_project_functor;
6464

65-
seq_project_functor(context.device_context(), *in, *padding_data, col,
65+
seq_project_functor(context.device_context(), *in, *padding_data,
6666
padding_trainable, context_start, context_length,
67-
context_stride, up_pad, down_pad);
67+
context_stride, up_pad, down_pad, &col);
6868

6969
math::matmul<Place, T>(context.device_context(), col, false, filter, false,
7070
static_cast<T>(1.0), out, static_cast<T>(0.0));
@@ -117,10 +117,10 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
117117
in_g->set_lod(in->lod());
118118
set_zero(context.device_context(), in_g, static_cast<T>(0));
119119

120-
seq_project_grad_functor(context.device_context(), *in_g, *padding_data_g,
121-
col, padding_trainable, context_start,
122-
context_length, context_stride, up_pad, down_pad,
123-
true, false);
120+
seq_project_grad_functor(context.device_context(), *in_g,
121+
padding_trainable, context_start, context_length,
122+
context_stride, up_pad, down_pad, false, true,
123+
padding_data_g, &col);
124124
}
125125

126126
if (padding_trainable && padding_data_g) {
@@ -129,9 +129,9 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
129129

130130
LoDTensor* input = const_cast<LoDTensor*>(in);
131131
seq_project_grad_functor(context.device_context(), *input,
132-
*padding_data_g, col, padding_trainable,
133-
context_start, context_length, context_stride,
134-
up_pad, down_pad, false, true);
132+
padding_trainable, context_start, context_length,
133+
context_stride, up_pad, down_pad, true, false,
134+
padding_data_g, &col);
135135
}
136136

137137
if (filter_g) {
@@ -146,9 +146,9 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
146146
padding_data = context.Input<Tensor>("PaddingData");
147147
}
148148

149-
seq_project_functor(context.device_context(), *in, *padding_data, col,
149+
seq_project_functor(context.device_context(), *in, *padding_data,
150150
padding_trainable, context_start, context_length,
151-
context_stride, up_pad, down_pad);
151+
context_stride, up_pad, down_pad, &col);
152152

153153
math::matmul<Place, T>(context.device_context(), col, true, out_grad,
154154
false, T(1.0), &filter_grad, T(1.0));

0 commit comments

Comments
 (0)