@@ -1403,7 +1403,7 @@ def conv2d(input,
1403
1403
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
1404
1404
1405
1405
Args:
1406
- input (Variable ): The input is 4-D Tensor with shape [N, C, H, W], the data type
1406
+ input (Tensor ): The input is 4-D Tensor with shape [N, C, H, W], the data type
1407
1407
of input is float16 or float32 or float64.
1408
1408
num_filters(int): The number of filter. It is as same as the output
1409
1409
image channel.
@@ -1456,9 +1456,9 @@ def conv2d(input,
1456
1456
`[batch_size, input_channels, input_height, input_width]`.
1457
1457
1458
1458
Returns:
1459
- A Variable holding Tensor representing the conv2d, whose data type is the
1460
- same with input. If act is None, the tensor variable storing the convolution
1461
- result, and if act is not None, the tensor variable storing convolution
1459
+ A Tensor representing the conv2d, whose data type is the
1460
+ same with input. If act is None, the tensor storing the convolution
1461
+ result, and if act is not None, the tensor storing convolution
1462
1462
and non-linearity activation result.
1463
1463
1464
1464
Raises:
@@ -1477,12 +1477,12 @@ def conv2d(input,
1477
1477
Examples:
1478
1478
.. code-block:: python
1479
1479
1480
- import paddle.fluid as fluid
1481
1480
import paddle
1482
1481
paddle.enable_static()
1483
1482
1484
- data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
1485
- conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
1483
+ data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
1484
+ conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
1485
+ print(conv2d.shape) # [-1, 2, 30, 30]
1486
1486
"""
1487
1487
1488
1488
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
@@ -3805,7 +3805,7 @@ def conv2d_transpose(input,
3805
3805
conv2d_transpose can compute the kernel size automatically.
3806
3806
3807
3807
Args:
3808
- input(Variable ): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
3808
+ input(Tensor ): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
3809
3809
its data type is float32 or float64.
3810
3810
num_filters(int): The number of the filter. It is as same as the output
3811
3811
image channel.
@@ -3823,15 +3823,14 @@ def conv2d_transpose(input,
3823
3823
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
3824
3824
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
3825
3825
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
3826
- padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds
3827
- `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a
3828
- string, either 'VALID' or 'SAME' supported, which is the padding algorithm.
3829
- If `padding` is a tuple or list, it could be in three forms:
3830
- `[pad_height, pad_width]` or
3831
- `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and
3832
- when `data_format` is `'NCHW'`,
3833
- `padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
3834
- when `data_format` is `'NHWC'`, `padding` can be in the form
3826
+ padding(str|int|list|tuple, optional): The padding size. It means the number of zero-paddings
3827
+ on both sides for each dimension. If `padding` is a string, either 'VALID' or
3828
+ 'SAME' which is the padding algorithm. If `padding` is a tuple or list,
3829
+ it could be in three forms: `[pad_height, pad_width]` or
3830
+ `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
3831
+ and when `data_format` is `"NCHW"`, `padding` can be in the form
3832
+ `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
3833
+ when `data_format` is `"NHWC"`, `padding` can be in the form
3835
3834
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
3836
3835
Default: padding = 0.
3837
3836
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
@@ -3869,11 +3868,11 @@ def conv2d_transpose(input,
3869
3868
`[batch_size, input_channels, input_height, input_width]`.
3870
3869
3871
3870
Returns:
3872
- A Variable holding Tensor representing the conv2d_transpose, whose
3871
+ A Tensor representing the conv2d_transpose, whose
3873
3872
data type is the same with input and shape is (num_batches, channels, out_h,
3874
- out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor variable
3873
+ out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor
3875
3874
storing the transposed convolution result, and if act is not None, the
3876
- tensor variable storing transposed convolution and non-linearity activation
3875
+ tensor storing transposed convolution and non-linearity activation
3877
3876
result.
3878
3877
3879
3878
Raises:
@@ -3892,11 +3891,12 @@ def conv2d_transpose(input,
3892
3891
Examples:
3893
3892
.. code-block:: python
3894
3893
3895
- import paddle.fluid as fluid
3896
3894
import paddle
3897
3895
paddle.enable_static()
3898
- data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
3899
- conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
3896
+
3897
+ data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
3898
+ conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
3899
+ print(conv2d_transpose.shape) # [-1, 2, 34, 34]
3900
3900
"""
3901
3901
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
3902
3902
if data_format not in ['NCHW', 'NHWC']:
0 commit comments