66from tensorflow .python .util .deprecation import deprecated
77
88__all__ = [
9- 'ramp' ,
109 'leaky_relu' ,
10+ 'leaky_relu6' ,
11+ 'leaky_twice_relu6' ,
12+ 'lrelu' ,
13+ 'lrelu6' ,
14+ 'ltrelu6' ,
15+ 'ramp' ,
1116 'swish' ,
1217 'sign' ,
18+ 'htanh' ,
19+ 'hard_tanh' ,
1320 'pixel_wise_softmax' ,
14- 'linear' ,
15- 'lrelu' ,
1621]
1722
1823
@@ -39,10 +44,16 @@ def ramp(x, v_min=0, v_max=1, name=None):
3944 return tf .clip_by_value (x , clip_value_min = v_min , clip_value_max = v_max , name = name )
4045
4146
42- def leaky_relu (x , alpha = 0.1 , name = "lrelu" ):
43- """LeakyReLU, Shortcut is ``lrelu``.
47+ @deprecated ("2018-09-30" , "This API is deprecated. Please use as `tf.nn.leaky_relu`." )
48+ def leaky_relu (x , alpha = 0.2 , name = "leaky_relu" ):
49+ """leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`.
50+
51+ This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper:
52+ `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
4453
45- Modified version of ReLU, introducing a nonzero gradient for negative input.
54+ The function return the following results:
55+ - When x < 0: ``f(x) = alpha_low * x``.
56+ - When x >= 0: ``f(x) = x``.
4657
4758 Parameters
4859 ----------
@@ -55,6 +66,7 @@ def leaky_relu(x, alpha=0.1, name="lrelu"):
5566
5667 Examples
5768 --------
69+ >>> import tensorlayer as tl
5870 >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')
5971
6072 Returns
@@ -64,16 +76,122 @@ def leaky_relu(x, alpha=0.1, name="lrelu"):
6476
6577 References
6678 ----------
67- - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013)`
68- http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf
79+ - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
6980
7081 """
71- # with tf.name_scope(name) as scope:
72- # x = tf.nn.relu(x)
73- # m_x = tf.nn.relu(-x)
74- # x -= alpha * m_x
75- x = tf .maximum (x , alpha * x , name = name )
76- return x
82+
83+ if not (0 < alpha <= 1 ):
84+ raise ValueError ("`alpha` value must be in [0, 1]`" )
85+
86+ with tf .name_scope (name , "leaky_relu" ) as name_scope :
87+ x = tf .convert_to_tensor (x , name = "features" )
88+ return tf .maximum (x , alpha * x , name = name_scope )
89+
90+
91+ def leaky_relu6 (x , alpha = 0.2 , name = "leaky_relu6" ):
92+ """:func:`leaky_relu6` can be used through its shortcut: :func:`tl.act.lrelu6`.
93+
94+ This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
95+ `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
96+
97+ This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
98+ `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
99+
100+ The function return the following results:
101+ - When x < 0: ``f(x) = alpha_low * x``.
102+ - When x in [0, 6]: ``f(x) = x``.
103+ - When x > 6: ``f(x) = 6``.
104+
105+ Parameters
106+ ----------
107+ x : Tensor
108+ Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
109+ alpha : float
110+ Slope.
111+ name : str
112+ The function name (optional).
113+
114+ Examples
115+ --------
116+ >>> import tensorlayer as tl
117+ >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_relu6(x, 0.2), name='dense')
118+
119+ Returns
120+ -------
121+ Tensor
122+ A ``Tensor`` in the same type as ``x``.
123+
124+ References
125+ ----------
126+ - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
127+ - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
128+ """
129+
130+ if not (0 < alpha <= 1 ):
131+ raise ValueError ("`alpha` value must be in [0, 1]`" )
132+
133+ with tf .name_scope (name , "leaky_relu6" ) as name_scope :
134+ x = tf .convert_to_tensor (x , name = "features" )
135+ return tf .minimum (tf .maximum (x , alpha * x ), 6 , name = name_scope )
136+
137+
138+ def leaky_twice_relu6 (x , alpha_low = 0.2 , alpha_high = 0.2 , name = "leaky_relu6" ):
139+ """:func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
140+
141+ This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
142+ `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
143+
144+ This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
145+ `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
146+
147+ This function push further the logic by adding `leaky` behaviour both below zero and above six.
148+
149+ The function return the following results:
150+ - When x < 0: ``f(x) = alpha_low * x``.
151+ - When x in [0, 6]: ``f(x) = x``.
152+ - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
153+
154+ Parameters
155+ ----------
156+ x : Tensor
157+ Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
158+ alpha_low : float
159+ Slope for x < 0: ``f(x) = alpha_low * x``.
160+ alpha_high : float
161+ Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
162+ name : str
163+ The function name (optional).
164+
165+ Examples
166+ --------
167+ >>> import tensorlayer as tl
168+ >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
169+
170+ Returns
171+ -------
172+ Tensor
173+ A ``Tensor`` in the same type as ``x``.
174+
175+ References
176+ ----------
177+ - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
178+ - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
179+
180+ """
181+
182+ if not (0 < alpha_high <= 1 ):
183+ raise ValueError ("`alpha_high` value must be in [0, 1]`" )
184+
185+ if not (0 < alpha_low <= 1 ):
186+ raise ValueError ("`alpha_low` value must be in [0, 1]`" )
187+
188+ with tf .name_scope (name , "leaky_twice_relu6" ) as name_scope :
189+ x = tf .convert_to_tensor (x , name = "features" )
190+
191+ x_is_above_0 = tf .minimum (x , 6 * (1 - alpha_high ) + alpha_high * x )
192+ x_is_below_0 = tf .minimum (alpha_low * x , 0 )
193+
194+ return tf .maximum (x_is_above_0 , x_is_below_0 , name = name_scope )
77195
78196
79197def swish (x , name = 'swish' ):
@@ -219,4 +337,6 @@ def pixel_wise_softmax(x, name='pixel_wise_softmax'):
219337
220338# Alias
221339lrelu = leaky_relu
340+ lrelu6 = leaky_relu6
341+ ltrelu6 = leaky_twice_relu6
222342htanh = hard_tanh
0 commit comments