@@ -34,9 +34,16 @@ class PadOp : public framework::OperatorWithKernel {
34
34
PADDLE_ENFORCE_EQ (x_dim.size () * 2 , int64_t (paddings.size ()),
35
35
" Size of paddings should be equal to 2 * dimension size "
36
36
" of input tensor." );
37
+ for (size_t i = 0 ; i < paddings.size (); ++i) {
38
+ PADDLE_ENFORCE_GE (paddings[i], 0 , " paddings should >= 0." );
39
+ }
37
40
std::vector<int64_t > out_dims (x_dim.size ());
38
41
for (int i = 0 ; i < x_dim.size (); ++i) {
39
- out_dims[i] = x_dim[i] + paddings[i * 2 ] + paddings[i * 2 + 1 ];
42
+ if ((!ctx->IsRuntime ()) && (x_dim[i] == -1 )) {
43
+ out_dims[i] = -1 ;
44
+ } else {
45
+ out_dims[i] = x_dim[i] + paddings[i * 2 ] + paddings[i * 2 + 1 ];
46
+ }
40
47
}
41
48
ctx->SetOutputDim (" Out" , framework::make_ddim (out_dims));
42
49
if (out_dims[0 ] == x_dim[0 ]) {
@@ -100,18 +107,14 @@ class PadOpGrad : public framework::OperatorWithKernel {
100
107
using framework::OperatorWithKernel::OperatorWithKernel;
101
108
102
109
void InferShape (framework::InferShapeContext* ctx) const override {
103
- auto dout_dims = ctx->GetInputDim (framework::GradVarName (" Out" ));
104
- auto & paddings = ctx->Attrs ().Get <std::vector<int >>(" paddings" );
105
- for (int i = 0 ; i < dout_dims.size (); ++i) {
106
- dout_dims[i] -= (paddings[i * 2 ] + paddings[i * 2 + 1 ]);
107
- }
108
-
109
110
auto x_grad_name = framework::GradVarName (" X" );
110
111
if (ctx->HasOutput (x_grad_name)) {
111
112
auto dout_dims = ctx->GetInputDim (framework::GradVarName (" Out" ));
112
113
auto & paddings = ctx->Attrs ().Get <std::vector<int >>(" paddings" );
113
114
for (int i = 0 ; i < dout_dims.size (); ++i) {
114
- dout_dims[i] -= (paddings[i * 2 ] + paddings[i * 2 + 1 ]);
115
+ if (ctx->IsRuntime () || (dout_dims[i] != -1 )) {
116
+ dout_dims[i] -= (paddings[i * 2 ] + paddings[i * 2 + 1 ]);
117
+ }
115
118
}
116
119
ctx->SetOutputDim (x_grad_name, dout_dims);
117
120
}
0 commit comments