Skip to content

Commit 58c9903

Browse files
authored
Add conj pixel shuffle yaml (#41499) (#41616)
* ad conj flip yaml * add flip conj pixel shuffle
1 parent 63f573a commit 58c9903

File tree

10 files changed

+91
-44
lines changed

10 files changed

+91
-44
lines changed

paddle/fluid/operators/pixel_shuffle_op.cc

Lines changed: 5 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -82,42 +82,6 @@ class PixelShuffleGradMaker : public framework::SingleGradOpMaker<T> {
8282
class PixelShuffleGradOp : public framework::OperatorWithKernel {
8383
public:
8484
using framework::OperatorWithKernel::OperatorWithKernel;
85-
86-
void InferShape(framework::InferShapeContext* ctx) const override {
87-
PADDLE_ENFORCE_EQ(
88-
ctx->HasInput(framework::GradVarName("Out")), true,
89-
platform::errors::NotFound("Input(Out@Grad) should not be null"));
90-
PADDLE_ENFORCE_EQ(
91-
ctx->HasOutput(framework::GradVarName("X")), true,
92-
platform::errors::NotFound("Output(X@Grad) should not be null"));
93-
94-
auto do_dims = ctx->GetInputDim(framework::GradVarName("Out"));
95-
PADDLE_ENFORCE_EQ(do_dims.size(), 4,
96-
platform::errors::InvalidArgument(
97-
"Input should be a 4-D tensor of format [N, C, H, W] "
98-
"or [N, H, W, C], but got %u.",
99-
do_dims.size()));
100-
101-
auto upscale_factor = ctx->Attrs().Get<int>("upscale_factor");
102-
103-
const std::string data_format =
104-
ctx->Attrs().Get<std::string>("data_format");
105-
const bool channel_last = (data_format == "NHWC");
106-
107-
auto dx_dims = do_dims;
108-
dx_dims[0] = do_dims[0];
109-
110-
if (!channel_last) {
111-
dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor);
112-
dx_dims[2] = do_dims[2] / upscale_factor;
113-
dx_dims[3] = do_dims[3] / upscale_factor;
114-
} else {
115-
dx_dims[1] = do_dims[1] / upscale_factor;
116-
dx_dims[2] = do_dims[2] / upscale_factor;
117-
dx_dims[3] = do_dims[3] * (upscale_factor * upscale_factor);
118-
}
119-
ctx->SetOutputDim(framework::GradVarName("X"), dx_dims);
120-
}
12185
};
12286

12387
} // namespace operators
@@ -132,7 +96,11 @@ REGISTER_OPERATOR(pixel_shuffle, ops::PixelShuffleOp, ops::PixelShuffleOpMaker,
13296
ops::PixelShuffleGradMaker<paddle::imperative::OpBase>,
13397
PixelShuffleInferShapeFunctor);
13498

135-
REGISTER_OPERATOR(pixel_shuffle_grad, ops::PixelShuffleGradOp);
99+
DECLARE_INFER_SHAPE_FUNCTOR(pixel_shuffle_grad,
100+
PixelShuffleGradInferShapeFunctor,
101+
PD_INFER_META(phi::PixelShuffleGradInferMeta));
102+
REGISTER_OPERATOR(pixel_shuffle_grad, ops::PixelShuffleGradOp,
103+
PixelShuffleGradInferShapeFunctor);
136104

137105
REGISTER_OP_VERSION(pixel_shuffle)
138106
.AddCheckpoint(

paddle/phi/infermeta/unary.cc

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1280,6 +1280,36 @@ void PixelShuffleInferMeta(const MetaTensor& x,
12801280
out->set_dims(output_dims);
12811281
}
12821282

1283+
void PixelShuffleGradInferMeta(const MetaTensor& out_grad,
1284+
int upscale_factor,
1285+
const std::string& data_format,
1286+
MetaTensor* x_grad) {
1287+
auto do_dims = out_grad.dims();
1288+
PADDLE_ENFORCE_EQ(do_dims.size(),
1289+
4,
1290+
phi::errors::InvalidArgument(
1291+
"Input should be a 4-D tensor of format [N, C, H, W] "
1292+
"or [N, H, W, C], but got %u.",
1293+
do_dims.size()));
1294+
1295+
const bool channel_last = (data_format == "NHWC");
1296+
1297+
auto dx_dims = do_dims;
1298+
dx_dims[0] = do_dims[0];
1299+
1300+
if (!channel_last) {
1301+
dx_dims[1] = do_dims[1] * (upscale_factor * upscale_factor);
1302+
dx_dims[2] = do_dims[2] / upscale_factor;
1303+
dx_dims[3] = do_dims[3] / upscale_factor;
1304+
} else {
1305+
dx_dims[1] = do_dims[1] / upscale_factor;
1306+
dx_dims[2] = do_dims[2] / upscale_factor;
1307+
dx_dims[3] = do_dims[3] * (upscale_factor * upscale_factor);
1308+
}
1309+
x_grad->set_dims(dx_dims);
1310+
x_grad->set_dtype(out_grad.dtype());
1311+
}
1312+
12831313
void PNormInferMeta(const MetaTensor& x,
12841314
float porder,
12851315
int axis,

paddle/phi/infermeta/unary.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,11 @@ void PixelShuffleInferMeta(const MetaTensor& x,
195195
const std::string& data_format,
196196
MetaTensor* out);
197197

198+
void PixelShuffleGradInferMeta(const MetaTensor& out_grad,
199+
int upscale_factor,
200+
const std::string& data_format,
201+
MetaTensor* x_grad);
202+
198203
void PNormInferMeta(const MetaTensor& x,
199204
float porder,
200205
int axis,

python/paddle/fluid/tests/unittests/test_conj_op.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
class TestConjOp(OpTest):
3333
def setUp(self):
3434
self.op_type = "conj"
35+
self.python_api = paddle.tensor.conj
3536
self.init_dtype_type()
3637
self.init_input_output()
3738
self.init_grad_input_output()
@@ -53,14 +54,15 @@ def init_grad_input_output(self):
5354
self.grad_in = np.conj(self.grad_out)
5455

5556
def test_check_output(self):
56-
self.check_output()
57+
self.check_output(check_eager=True)
5758

5859
def test_check_grad_normal(self):
5960
self.check_grad(
6061
['X'],
6162
'Out',
6263
user_defined_grads=[self.grad_in],
63-
user_defined_grad_outputs=[self.grad_out])
64+
user_defined_grad_outputs=[self.grad_out],
65+
check_eager=True)
6466

6567

6668
class TestComplexConjOp(unittest.TestCase):

python/paddle/fluid/tests/unittests/test_flip.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ def test_dygraph(self):
6767
class TestFlipOp(OpTest):
6868
def setUp(self):
6969
self.op_type = 'flip'
70+
self.python_api = paddle.tensor.flip
7071
self.init_test_case()
7172
self.inputs = {'X': np.random.random(self.in_shape).astype('float64')}
7273
self.init_attrs()
@@ -76,10 +77,10 @@ def init_attrs(self):
7677
self.attrs = {"axis": self.axis}
7778

7879
def test_check_output(self):
79-
self.check_output()
80+
self.check_output(check_eager=True)
8081

8182
def test_check_grad(self):
82-
self.check_grad(["X"], "Out")
83+
self.check_grad(["X"], "Out", check_eager=True)
8384

8485
def init_test_case(self):
8586
self.in_shape = (6, 4, 2, 3)
@@ -131,4 +132,5 @@ def init_test_case(self):
131132

132133

133134
if __name__ == "__main__":
135+
paddle.enable_static()
134136
unittest.main()

python/paddle/fluid/tests/unittests/test_pixel_shuffle.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"):
5252
class TestPixelShuffleOp(OpTest):
5353
def setUp(self):
5454
self.op_type = "pixel_shuffle"
55+
self.python_api = paddle.nn.functional.pixel_shuffle
5556
self.init_data_format()
5657
n, c, h, w = 2, 9, 4, 4
5758

@@ -73,10 +74,10 @@ def init_data_format(self):
7374
self.format = "NCHW"
7475

7576
def test_check_output(self):
76-
self.check_output()
77+
self.check_output(check_eager=True)
7778

7879
def test_check_grad(self):
79-
self.check_grad(['X'], 'Out')
80+
self.check_grad(['X'], 'Out', check_eager=True)
8081

8182

8283
class TestChannelLast(TestPixelShuffleOp):
@@ -220,4 +221,5 @@ def error_data_format_layer():
220221

221222

222223
if __name__ == '__main__':
224+
paddle.enable_static()
223225
unittest.main()

python/paddle/tensor/manipulation.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -458,6 +458,10 @@ def flip(x, axis, name=None):
458458
"""
459459
if isinstance(axis, int):
460460
axis = [axis]
461+
462+
if in_dygraph_mode():
463+
return _C_ops.final_state_flip(x, axis)
464+
461465
if paddle.in_dynamic_mode():
462466
return _C_ops.flip(x, "axis", axis)
463467

python/paddle/tensor/math.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3349,6 +3349,9 @@ def conj(x, name=None):
33493349
# [(4-4j), (5-5j), (6-6j)]])
33503350
33513351
"""
3352+
if in_dygraph_mode():
3353+
return _C_ops.final_state_conj(x)
3354+
33523355
if paddle.in_dynamic_mode():
33533356
return _C_ops.conj(x)
33543357

python/paddle/utils/code_gen/api.yaml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,7 @@
345345
func : UnchangedInferMeta
346346
kernel :
347347
func : conj
348+
backward : conj_grad
348349

349350
- api : conv2d
350351
args : (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
@@ -628,6 +629,7 @@
628629
func : FlipInferMeta
629630
kernel :
630631
func : flip
632+
backward : flip_grad
631633

632634
- api : floor
633635
args : (Tensor x)
@@ -1382,7 +1384,7 @@
13821384
func : PixelShuffleInferMeta
13831385
kernel :
13841386
func : pixel_shuffle
1385-
# backward : pixel_shuffle_grad
1387+
backward : pixel_shuffle_grad
13861388

13871389
# poisson // no need grad
13881390
- api : poisson

python/paddle/utils/code_gen/backward.yaml

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,16 @@
208208
output : Tensor[](x_grad)
209209
invoke : concat_grad_impl(x, out_grad, axis)
210210

211+
- backward_api : conj_grad
212+
forward : conj (Tensor x) -> Tensor(out)
213+
args : (Tensor out_grad)
214+
output : Tensor(x_grad)
215+
infer_meta :
216+
func : UnchangedInferMeta
217+
param: [out_grad]
218+
kernel :
219+
func : conj
220+
211221
- backward_api : conv2d_grad
212222
forward : conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
213223
args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
@@ -436,6 +446,16 @@
436446
backend: out_grad
437447
layout: out_grad
438448

449+
- backward_api : flip_grad
450+
forward : flip (Tensor x, int[] axis) -> Tensor(out)
451+
args : (Tensor out_grad, int[] axis)
452+
output : Tensor(x_grad)
453+
infer_meta :
454+
func : UnchangedInferMeta
455+
param: [out_grad]
456+
kernel :
457+
func : flip
458+
439459
- backward_api : floor_grad
440460
forward : floor(Tensor x) -> Tensor(out)
441461
args : (Tensor out_grad)
@@ -990,6 +1010,15 @@
9901010
kernel :
9911011
func : pad3d_grad
9921012

1013+
- backward_api : pixel_shuffle_grad
1014+
forward : pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
1015+
args : (Tensor out_grad, int upscale_factor, str data_format)
1016+
output : Tensor(x_grad)
1017+
infer_meta :
1018+
func : PixelShuffleGradInferMeta
1019+
kernel :
1020+
func : pixel_shuffle_grad
1021+
9931022
- backward_api : pool2d_grad
9941023
forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
9951024
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)

0 commit comments

Comments
 (0)