@@ -1540,6 +1540,68 @@ def __init__(
15401540 # self.all_drop = dict(layer.all_drop)
15411541 # self.all_layers.extend( [self.outputs] )
15421542
1543+ ## Initializers for Convuolutional Layers
1544+ def deconv2d_bilinear_upsampling_initializer (shape ):
1545+ """Returns initializer that can be passed to DeConv2dLayer to initalize the
1546+ weights to correspond to channel wise bilinear upsampling.
1547+ Used in some segmantic segmentation approches such as [FCN](https://arxiv.org/abs/1605.06211)
1548+
1549+ Parameters
1550+ ----------
1551+ shape : list of shape
1552+ shape of the filters, [height, width, output_channels, in_channels], must match that passed to DeConv2dLayer
1553+
1554+ Returns
1555+ ----------
1556+ tf.constant_initializer
1557+ with weights set to correspond to per channel bilinear upsampling when passed as W_int in DeConv2dLayer
1558+
1559+ Examples
1560+ --------
1561+ >>> rescale_factor = 2 #upsampling by a factor of 2, ie e.g 100->200
1562+ >>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size
1563+ >>> num_in_channels = 3
1564+ >>> num_out_channels = 3
1565+ >>> deconv_filter_shape = [filter_size, filter_size, num_out_channels, num_in_channels]
1566+ >>> x = tf.placeholder(tf.float32, [1, imsize, imsize, num_channels])
1567+ >>> network = tl.layers.InputLayer(x, name='input_layer')
1568+ >>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape)
1569+ >>> network = tl.layers.DeConv2dLayer(network,
1570+ shape = filter_shape,
1571+ output_shape = [1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels],
1572+ strides=[1, rescale_factor, rescale_factor, 1],
1573+ W_init=bilinear_init,
1574+ padding='SAME',
1575+ act=tf.identity, name='g/h1/decon2d')
1576+ """
1577+ if shape [0 ] != shape [1 ]:
1578+ raise Exception ('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes' )
1579+ if shape [3 ] < shape [2 ]:
1580+ raise Exception ('deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ' )
1581+
1582+ filter_size = shape [0 ]
1583+ num_out_channels = shape [2 ]
1584+ num_in_channels = shape [3 ]
1585+
1586+ #Create bilinear filter kernel as numpy array
1587+ bilinear_kernel = np .zeros ([filter_size , filter_size ], dtype = np .float32 )
1588+ scale_factor = (filter_size + 1 ) // 2
1589+ if filter_size % 2 == 1 :
1590+ center = scale_factor - 1
1591+ else :
1592+ center = scale_factor - 0.5
1593+ for x in range (filter_size ):
1594+ for y in range (filter_size ):
1595+ bilinear_kernel [x ,y ] = (1 - abs (x - center ) / scale_factor ) * \
1596+ (1 - abs (y - center ) / scale_factor )
1597+ weights = np .zeros ((filter_size , filter_size , num_out_channels , num_in_channels ))
1598+ for i in range (num_out_channels ):
1599+ weights [:, :, i , i ] = bilinear_kernel
1600+
1601+ #assign numpy array to constant_initalizer and pass to get_variable
1602+ bilinear_weights_init = tf .constant_initializer (value = weights , dtype = tf .float32 )
1603+ return bilinear_weights_init
1604+
15431605## Convolutional layer (Simplified)
15441606def Conv2d (net , n_filter = 32 , filter_size = (3 , 3 ), strides = (1 , 1 ), act = None ,
15451607 padding = 'SAME' , W_init = tf .truncated_normal_initializer (stddev = 0.02 ), b_init = tf .constant_initializer (value = 0.0 ),
0 commit comments