Skip to content

Commit c5d7107

Browse files
refine var name
1 parent 0a6262d commit c5d7107

File tree

1 file changed

+22
-22
lines changed

1 file changed

+22
-22
lines changed

paddle/operators/bilinear_tensor_product_op.h

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -43,25 +43,25 @@ class BilinearTensorProductKernel : public framework::OpKernel<T> {
4343

4444
auto batch_size = x->dims()[0];
4545
auto weight_dims = weight->dims();
46-
int Out_dim = weight_dims[0];
47-
int X_dim = weight_dims[1];
48-
int Y_dim = weight_dims[2];
46+
int out_dim = weight_dims[0];
47+
auto x_dim = weight_dims[1];
48+
auto y_dim = weight_dims[2];
4949
auto place = ctx.GetEigenDevice<Place>();
5050

5151
// Create the intermediate variable to caculate the result of
5252
// Input(X) multiplied by Input(Weight_i), the formula is:
5353
// left_mul = X Weight_i.
5454
Tensor left_mul;
55-
left_mul.mutable_data<T>(framework::make_ddim({batch_size, Y_dim}),
55+
left_mul.mutable_data<T>(framework::make_ddim({batch_size, y_dim}),
5656
ctx.GetPlace());
5757
auto left_mul_mat = EigenMatrix<T>::From(left_mul);
5858

59-
for (int i = 0; i < Out_dim; ++i) {
59+
for (int i = 0; i < out_dim; ++i) {
6060
auto output_col_vec = output_mat.chip(i, 1);
6161
Tensor weight_mat =
62-
weight->Slice(i, i + 1).Resize(framework::make_ddim({X_dim, Y_dim}));
62+
weight->Slice(i, i + 1).Resize(framework::make_ddim({x_dim, y_dim}));
6363
math::gemm<Place, T>(ctx.device_context(), CblasNoTrans, CblasNoTrans,
64-
batch_size, Y_dim, X_dim, 1, x->data<T>(),
64+
batch_size, y_dim, x_dim, 1, x->data<T>(),
6565
weight_mat.data<T>(), 0, left_mul.data<T>());
6666
output_col_vec.device(place) =
6767
(left_mul_mat * y_mat).sum(Eigen::DSizes<int, 1>(1));
@@ -89,9 +89,9 @@ class BilinearTensorProductGradKernel : public framework::OpKernel<T> {
8989

9090
auto batch_size = x->dims()[0];
9191
auto weight_dims = weight->dims();
92-
int Out_dim = weight_dims[0];
93-
int X_dim = weight_dims[1];
94-
int Y_dim = weight_dims[2];
92+
int out_dim = weight_dims[0];
93+
auto x_dim = weight_dims[1];
94+
auto y_dim = weight_dims[2];
9595

9696
auto x_mat = EigenMatrix<T>::From(*x);
9797
auto y_mat = EigenMatrix<T>::From(*y);
@@ -100,13 +100,13 @@ class BilinearTensorProductGradKernel : public framework::OpKernel<T> {
100100

101101
// Create the intermediate variable to caculate the Output(Y@Grad).
102102
Tensor x_scale;
103-
x_scale.mutable_data<T>(framework::make_ddim({batch_size, X_dim}),
103+
x_scale.mutable_data<T>(framework::make_ddim({batch_size, x_dim}),
104104
ctx.GetPlace());
105105
auto x_scale_mat = EigenMatrix<T>::From(x_scale);
106106

107107
// Create the intermediate variable to caculate the Output(X@Grad).
108108
Tensor y_scale;
109-
y_scale.mutable_data<T>(framework::make_ddim({batch_size, Y_dim}),
109+
y_scale.mutable_data<T>(framework::make_ddim({batch_size, y_dim}),
110110
ctx.GetPlace());
111111
auto y_scale_mat = EigenMatrix<T>::From(y_scale);
112112

@@ -126,19 +126,19 @@ class BilinearTensorProductGradKernel : public framework::OpKernel<T> {
126126

127127
// Caculate the Output(X@Grad) and Output(Y@Grad).
128128
if (d_x || d_y) {
129-
Eigen::DSizes<int, 2> bcast_for_x(1, Y_dim);
130-
Eigen::DSizes<int, 2> bcast_for_y(1, X_dim);
131-
for (int i = 0; i < Out_dim; ++i) {
129+
Eigen::DSizes<int, 2> bcast_for_x(1, y_dim);
130+
Eigen::DSizes<int, 2> bcast_for_y(1, x_dim);
131+
for (int i = 0; i < out_dim; ++i) {
132132
Tensor weight_i = weight->Slice(i, i + 1).Resize(
133-
framework::make_ddim({X_dim, Y_dim}));
133+
framework::make_ddim({x_dim, y_dim}));
134134
auto output_vec = d_out_mat.chip(i, 1);
135135
if (d_x) {
136136
y_scale_mat.device(place) =
137137
output_vec.reshape(Eigen::DSizes<int, 2>(batch_size, 1))
138138
.broadcast(bcast_for_x) *
139139
y_mat;
140140
math::gemm<Place, T>(ctx.device_context(), CblasNoTrans, CblasTrans,
141-
batch_size, X_dim, Y_dim, 1, y_scale.data<T>(),
141+
batch_size, x_dim, y_dim, 1, y_scale.data<T>(),
142142
weight_i.data<T>(), 1, d_x->data<T>());
143143
}
144144
if (d_y) {
@@ -147,7 +147,7 @@ class BilinearTensorProductGradKernel : public framework::OpKernel<T> {
147147
.broadcast(bcast_for_y) *
148148
x_mat;
149149
math::gemm<Place, T>(ctx.device_context(), CblasNoTrans, CblasNoTrans,
150-
batch_size, Y_dim, X_dim, 1, x_scale.data<T>(),
150+
batch_size, y_dim, x_dim, 1, x_scale.data<T>(),
151151
weight_i.data<T>(), 1, d_y->data<T>());
152152
}
153153
}
@@ -156,17 +156,17 @@ class BilinearTensorProductGradKernel : public framework::OpKernel<T> {
156156
// Caculate the gradient of Input(Weight).
157157
if (d_weight) {
158158
d_weight->mutable_data<T>(ctx.GetPlace());
159-
Eigen::DSizes<int, 2> bcast_for_weight(1, X_dim);
160-
for (int i = 0; i < Out_dim; ++i) {
159+
Eigen::DSizes<int, 2> bcast_for_weight(1, x_dim);
160+
for (int i = 0; i < out_dim; ++i) {
161161
Tensor d_weight_i = d_weight->Slice(i, i + 1).Resize(
162-
framework::make_ddim({X_dim, Y_dim}));
162+
framework::make_ddim({x_dim, y_dim}));
163163
auto output_vec = d_out_mat.chip(i, 1);
164164
x_scale_mat.device(place) =
165165
output_vec.reshape(Eigen::DSizes<int, 2>(batch_size, 1))
166166
.broadcast(bcast_for_weight) *
167167
x_mat;
168168
math::gemm<Place, T>(ctx.device_context(), CblasTrans, CblasNoTrans,
169-
X_dim, Y_dim, batch_size, 1, x_scale.data<T>(),
169+
x_dim, y_dim, batch_size, 1, x_scale.data<T>(),
170170
y->data<T>(), 0, d_weight_i.data<T>());
171171
}
172172
}

0 commit comments

Comments
 (0)