Skip to content

Commit c226cb3

Browse files
authored
update deconv2d function-->class, support dilation (#496)
* update deconv2d function-->class, support dilation * fix review comment * fix review comment * fix yapf * fix review commend
1 parent af8f935 commit c226cb3

File tree

4 files changed

+75
-86
lines changed

4 files changed

+75
-86
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
<img src="img/tl_transparent_logo.png" width="50%" height="30%"/>
1313
</div>
1414
</a>
15-
15+
1616
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/ca2a29ddcf7445588beff50bee5406d9)](https://app.codacy.com/app/tensorlayer/tensorlayer?utm_source=github.com&utm_medium=referral&utm_content=tensorlayer/tensorlayer&utm_campaign=badger)
1717
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tensorlayer/Lobby#?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
1818
[![Build Status](https://travis-ci.org/tensorlayer/tensorlayer.svg?branch=master)](https://travis-ci.org/tensorlayer/tensorlayer)

docs/modules/layers.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,7 @@ APIs may better for you.
492492

493493
2D Deconvolution
494494
^^^^^^^^^^^^^^^^^^^^^^^^^^
495-
.. autofunction:: DeConv2d
495+
.. autoclass:: DeConv2d
496496

497497
3D Deconvolution
498498
^^^^^^^^^^^^^^^^^^^^^^^^^^

example/tutorial_mnist_simple.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,13 @@
22
# -*- coding: utf-8 -*-
33

44
import tensorflow as tf
5-
65
import tensorlayer as tl
76

87
sess = tf.InteractiveSession()
98

109
# prepare data
1110
X_train, y_train, X_val, y_val, X_test, y_test = \
12-
tl.files.load_mnist_dataset(shape=(-1,784))
11+
tl.files.load_mnist_dataset(shape=(-1, 784))
1312
# define placeholder
1413
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
1514
y_ = tf.placeholder(tf.int64, shape=[None], name='y_')

tensorlayer/layers/convolution.py

Lines changed: 72 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -1415,6 +1415,7 @@ def __init__(
14151415
strides=(1, 1),
14161416
act=tf.identity,
14171417
padding='SAME',
1418+
dilation_rate=(1, 1),
14181419
W_init=tf.truncated_normal_initializer(stddev=0.02),
14191420
b_init=tf.constant_initializer(value=0.0),
14201421
W_init_args=None,
@@ -1472,7 +1473,7 @@ def __init__(
14721473
strides=strides,
14731474
padding=padding,
14741475
data_format='channels_last',
1475-
dilation_rate=(1, 1),
1476+
dilation_rate=dilation_rate,
14761477
activation=act,
14771478
use_bias=(False if b_init is None else True),
14781479
kernel_initializer=W_init, #None,
@@ -1520,20 +1521,7 @@ def __init__(
15201521
self.all_params.append(W)
15211522

15221523

1523-
@deprecated_alias(layer='prev_layer', n_out_channel='n_filter', end_support_version=1.9) # TODO remove this line for the 1.9 release
1524-
def deconv2d(prev_layer,
1525-
n_filter,
1526-
filter_size=(3, 3),
1527-
out_size=(30, 30),
1528-
strides=(2, 2),
1529-
padding='SAME',
1530-
batch_size=None,
1531-
act=tf.identity,
1532-
W_init=tf.truncated_normal_initializer(stddev=0.02),
1533-
b_init=tf.constant_initializer(value=0.0),
1534-
W_init_args=None,
1535-
b_init_args=None,
1536-
name='decnn2d'):
1524+
class DeConv2d(Layer):
15371525
"""Simplified version of :class:`DeConv2dLayer`.
15381526
15391527
Parameters
@@ -1550,8 +1538,8 @@ def deconv2d(prev_layer,
15501538
The stride step (height, width).
15511539
padding : str
15521540
The padding algorithm type: "SAME" or "VALID".
1553-
batch_size : int
1554-
Require if TF version < 1.3, int or None.
1541+
batch_size : int or None
1542+
Require if TF < 1.3, int or None.
15551543
If None, try to find the `batch_size` from the first dim of net.outputs (you should define the `batch_size` in the input placeholder).
15561544
act : activation function
15571545
The activation function of this layer.
@@ -1560,79 +1548,81 @@ def deconv2d(prev_layer,
15601548
b_init : initializer or None
15611549
The initializer for the bias vector. If None, skip biases.
15621550
W_init_args : dictionary
1563-
The arguments for the weight matrix initializer.
1551+
The arguments for the weight matrix initializer (For TF < 1.3).
15641552
b_init_args : dictionary
1565-
The arguments for the bias vector initializer.
1553+
The arguments for the bias vector initializer (For TF < 1.3).
15661554
name : str
15671555
A unique layer name.
15681556
1569-
Returns
1570-
-------
1571-
:class:`Layer`
1572-
A :class:`DeConv2dLayer` object.
1573-
15741557
"""
15751558

1576-
logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))
1559+
@deprecated_alias(layer='prev_layer', n_out_channel='n_filter', end_support_version=1.9) # TODO remove this line for the 1.9 release
1560+
def __init__(
1561+
self,
1562+
prev_layer,
1563+
n_filter=32,
1564+
filter_size=(3, 3),
1565+
out_size=(30, 30), # remove
1566+
strides=(2, 2),
1567+
padding='SAME',
1568+
batch_size=None, # remove
1569+
act=tf.identity,
1570+
W_init=tf.truncated_normal_initializer(stddev=0.02),
1571+
b_init=tf.constant_initializer(value=0.0),
1572+
W_init_args=None, # remove
1573+
b_init_args=None, # remove
1574+
name='decnn2d'):
1575+
super(DeConv2d, self).__init__(prev_layer=prev_layer, name=name)
1576+
logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))
15771577

1578-
if W_init_args is None:
1579-
W_init_args = {}
1580-
if b_init_args is None:
1581-
b_init_args = {}
1582-
if act is None:
1583-
act = tf.identity
1584-
1585-
if len(strides) != 2:
1586-
raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.")
1587-
1588-
if tf.__version__ > '1.3':
1589-
inputs = prev_layer.outputs
1590-
scope_name = tf.get_variable_scope().name
1591-
# if scope_name:
1592-
# whole_name = scope_name + '/' + name
1593-
# else:
1594-
# whole_name = name
1595-
net_new = Layer(prev_layer=None, name=name)
1596-
# with tf.name_scope(name):
1597-
with tf.variable_scope(name) as vs:
1598-
net_new.outputs = tf.contrib.layers.conv2d_transpose(
1599-
inputs=inputs,
1600-
num_outputs=n_filter,
1578+
if W_init_args is None:
1579+
W_init_args = {}
1580+
if b_init_args is None:
1581+
b_init_args = {}
1582+
if act is None:
1583+
act = tf.identity
1584+
1585+
if len(strides) != 2:
1586+
raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.")
1587+
1588+
if tf.__version__ > '1.3':
1589+
self.inputs = prev_layer.outputs
1590+
# scope_name = tf.get_variable_scope().name
1591+
conv2d_transpose = tf.layers.Conv2DTranspose(
1592+
filters=n_filter,
16011593
kernel_size=filter_size,
1602-
stride=strides,
1594+
strides=strides,
16031595
padding=padding,
1604-
activation_fn=act,
1605-
weights_initializer=W_init,
1606-
biases_initializer=b_init,
1607-
scope=name)
1608-
new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
1609-
net_new.all_layers = list(prev_layer.all_layers)
1610-
net_new.all_params = list(prev_layer.all_params)
1611-
net_new.all_drop = dict(prev_layer.all_drop)
1612-
net_new.all_layers.extend([net_new.outputs])
1613-
net_new.all_params.extend(new_variables)
1614-
return net_new
1615-
else:
1616-
if batch_size is None:
1617-
# batch_size = tf.shape(net.outputs)[0]
1618-
fixed_batch_size = prev_layer.outputs.get_shape().with_rank_at_least(1)[0]
1619-
if fixed_batch_size.value:
1620-
batch_size = fixed_batch_size.value
1621-
else:
1622-
from tensorflow.python.ops import array_ops
1623-
batch_size = array_ops.shape(prev_layer.outputs)[0]
1624-
return DeConv2dLayer(
1625-
prev_layer=prev_layer,
1626-
act=act,
1627-
shape=(filter_size[0], filter_size[1], n_filter, int(prev_layer.outputs.get_shape()[-1])),
1628-
output_shape=(batch_size, int(out_size[0]), int(out_size[1]), n_filter),
1629-
strides=(1, strides[0], strides[1], 1),
1630-
padding=padding,
1631-
W_init=W_init,
1632-
b_init=b_init,
1633-
W_init_args=W_init_args,
1634-
b_init_args=b_init_args,
1635-
name=name)
1596+
activation=act,
1597+
kernel_initializer=W_init,
1598+
bias_initializer=b_init,
1599+
name=name)
1600+
self.outputs = conv2d_transpose(self.inputs)
1601+
new_variables = conv2d_transpose.weights # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
1602+
self.all_layers.append(self.outputs)
1603+
self.all_params.extend(new_variables)
1604+
else:
1605+
raise RuntimeError("please update TF > 1.3 or downgrade TL < 1.8.4")
1606+
# if batch_size is None:
1607+
# # batch_size = tf.shape(net.outputs)[0]
1608+
# fixed_batch_size = prev_layer.outputs.get_shape().with_rank_at_least(1)[0]
1609+
# if fixed_batch_size.value:
1610+
# batch_size = fixed_batch_size.value
1611+
# else:
1612+
# from tensorflow.python.ops import array_ops
1613+
# batch_size = array_ops.shape(prev_layer.outputs)[0]
1614+
# return DeConv2dLayer(
1615+
# prev_layer=prev_layer,
1616+
# act=act,
1617+
# shape=(filter_size[0], filter_size[1], n_filter, int(prev_layer.outputs.get_shape()[-1])),
1618+
# output_shape=(batch_size, int(out_size[0]), int(out_size[1]), n_filter),
1619+
# strides=(1, strides[0], strides[1], 1),
1620+
# padding=padding,
1621+
# W_init=W_init,
1622+
# b_init=b_init,
1623+
# W_init_args=W_init_args,
1624+
# b_init_args=b_init_args,
1625+
# name=name)
16361626

16371627

16381628
class DeConv3d(Layer):
@@ -2022,4 +2012,4 @@ def __init__(
20222012
AtrousConv1dLayer = atrous_conv1d
20232013
Conv1d = conv1d
20242014
# Conv2d = conv2d
2025-
DeConv2d = deconv2d
2015+
# DeConv2d = deconv2d

0 commit comments

Comments
 (0)