@@ -1117,12 +1117,14 @@ def conv2d(input,
1117
1117
filter_size ,
1118
1118
stride = 1 ,
1119
1119
padding = 0 ,
1120
+ dilation = 1 ,
1120
1121
groups = None ,
1121
1122
param_attr = None ,
1122
1123
bias_attr = None ,
1123
1124
use_cudnn = True ,
1124
1125
use_mkldnn = False ,
1125
- act = None ):
1126
+ act = None ,
1127
+ name = None ):
1126
1128
"""
1127
1129
**Convlution2D Layer**
1128
1130
@@ -1183,6 +1185,9 @@ def conv2d(input,
1183
1185
padding(int|tuple): The padding size. If padding is a tuple, it must
1184
1186
contain two integers, (padding_H, padding_W). Otherwise, the
1185
1187
padding_H = padding_W = padding. Default: padding = 0.
1188
+ dilation(int|tuple): The dilation size. If dilation is a tuple, it must
1189
+ contain two integers, (dilation_H, dilation_W). Otherwise, the
1190
+ dilation_H = dilation_W = dilation. Default: dilation = 1.
1186
1191
groups(int): The groups number of the Conv2d Layer. According to grouped
1187
1192
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
1188
1193
the first half of the filters is only connected to the first half
@@ -1193,6 +1198,8 @@ def conv2d(input,
1193
1198
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
1194
1199
library is installed. Default: True
1195
1200
act(str): Activation type. Default: None
1201
+ name(str|None): A name for this layer(optional). If set None, the layer
1202
+ will be named automatically.
1196
1203
1197
1204
Returns:
1198
1205
Variable: The tensor variable storing the convolution and \
@@ -1233,6 +1240,7 @@ def conv2d(input,
1233
1240
filter_size = utils .convert_to_list (filter_size , 2 , 'filter_size' )
1234
1241
stride = utils .convert_to_list (stride , 2 , 'stride' )
1235
1242
padding = utils .convert_to_list (padding , 2 , 'padding' )
1243
+ dilation = utils .convert_to_list (dilation , 2 , 'dilation' )
1236
1244
1237
1245
if not isinstance (use_cudnn , bool ):
1238
1246
raise ValueError ("use_cudnn should be True or False" )
@@ -1262,6 +1270,7 @@ def _get_default_param_initializer():
1262
1270
attrs = {
1263
1271
'strides' : stride ,
1264
1272
'paddings' : padding ,
1273
+ 'dilations' : dilation ,
1265
1274
'groups' : groups ,
1266
1275
'use_cudnn' : use_cudnn ,
1267
1276
'use_mkldnn' : use_mkldnn
@@ -1670,7 +1679,9 @@ def conv2d_transpose(input,
1670
1679
stride = 1 ,
1671
1680
dilation = 1 ,
1672
1681
param_attr = None ,
1682
+ bias_attr = None ,
1673
1683
use_cudnn = True ,
1684
+ act = None ,
1674
1685
name = None ):
1675
1686
"""
1676
1687
**Convlution2D transpose layer**
@@ -1739,8 +1750,10 @@ def conv2d_transpose(input,
1739
1750
dilation_H = dilation_W = dilation. Default: dilation = 1.
1740
1751
param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer.
1741
1752
Default: None
1753
+ bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None
1742
1754
use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn
1743
1755
library is installed. Default: True
1756
+ act(str): Activation type. Default: None
1744
1757
name(str|None): A name for this layer(optional). If set None, the layer
1745
1758
will be named automatically.
1746
1759
@@ -1793,19 +1806,21 @@ def conv2d_transpose(input,
1793
1806
img_filter = helper .create_parameter (
1794
1807
dtype = input .dtype , shape = filter_shape , attr = helper .param_attr )
1795
1808
1796
- out = helper .create_tmp_variable (dtype = input .dtype )
1809
+ pre_bias = helper .create_tmp_variable (dtype = input .dtype )
1797
1810
helper .append_op (
1798
1811
type = 'conv2d_transpose' ,
1799
1812
inputs = {'Input' : [input ],
1800
1813
'Filter' : [img_filter ]},
1801
- outputs = {'Output' : out },
1814
+ outputs = {'Output' : pre_bias },
1802
1815
attrs = {
1803
1816
'strides' : stride ,
1804
1817
'paddings' : padding ,
1805
1818
'dilations' : dilation ,
1806
1819
'use_cudnn' : use_cudnn
1807
1820
})
1808
1821
1822
+ pre_act = helper .append_bias_op (pre_bias , dim_start = 1 , dim_end = 2 )
1823
+ out = helper .append_activation (pre_act )
1809
1824
return out
1810
1825
1811
1826
0 commit comments