Skip to content

Commit 1b5dd6e

Browse files
committed
update for TF 1.0
1 parent 6613a4b commit 1b5dd6e

File tree

3 files changed

+61
-27
lines changed

3 files changed

+61
-27
lines changed

tensorlayer/cost.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,7 @@ def mean_squared_error(output, target):
7878
A distribution with shape: [batch_size, n_feature].
7979
"""
8080
with tf.name_scope("mean_squared_error_loss"):
81-
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target),
82-
reduction_indices = 1))
81+
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
8382
return mse
8483

8584

@@ -238,9 +237,14 @@ def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=
238237
targets = tf.reshape(target_seqs, [-1]) # to one vector
239238
weights = tf.to_float(tf.reshape(input_mask, [-1])) # to one vector like targets
240239
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets)
241-
loss = tf.div(tf.reduce_sum(tf.mul(losses, weights)), # loss from mask. reduce_sum before element-wise mul with mask !!
242-
tf.reduce_sum(weights),
243-
name="seq_loss_with_mask")
240+
try: ## TF1.0
241+
loss = tf.divide(tf.reduce_sum(tf.multiply(losses, weights)), # loss from mask. reduce_sum before element-wise mul with mask !!
242+
tf.reduce_sum(weights),
243+
name="seq_loss_with_mask")
244+
except: ## TF0.12
245+
loss = tf.div(tf.reduce_sum(tf.mul(losses, weights)), # loss from mask. reduce_sum before element-wise mul with mask !!
246+
tf.reduce_sum(weights),
247+
name="seq_loss_with_mask")
244248
if return_details:
245249
return loss, losses, weights, targets
246250
else:
@@ -258,8 +262,11 @@ def cosine_similarity(v1, v2):
258262
-----------
259263
a tensor of [batch_size, ]
260264
"""
261-
return tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
262-
265+
try: ## TF1.0
266+
cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
267+
except: ## TF0.12
268+
cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
269+
return cost
263270

264271

265272
## Regularization Functions

tensorlayer/layers.py

Lines changed: 43 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -700,9 +700,9 @@ def __init__(
700700
print(" learning_rate: %f" % learning_rate)
701701

702702
# Mean-squre-error i.e. quadratic-cost
703-
mse = tf.reduce_sum(tf.squared_difference(y, x_recon), reduction_indices = 1)
703+
mse = tf.reduce_sum(tf.squared_difference(y, x_recon), 1)
704704
mse = tf.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean()
705-
# mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), reduction_indices = 1))
705+
# mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), 1))
706706
# mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # <haodong>: Error
707707
# mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # <haodong>: Error
708708
# Cross-entropy
@@ -719,13 +719,16 @@ def __init__(
719719
# L1 of activation outputs
720720
activation_out = self.all_layers[-2]
721721
L1_a = 0.001 * tf.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black
722-
# L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, reduction_indices=0) ) # <haodong>: some neuron are broken, white and black
723-
# L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, reduction_indices=1) ) # <haodong>: some neuron are broken, white and black
722+
# L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black
723+
# L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black
724724
# KL Divergence
725725
beta = 4
726726
rho = 0.15
727-
p_hat = tf.reduce_mean(activation_out, reduction_indices = 0) # theano: p_hat = T.mean( self.a[i], axis=0 )
728-
KLD = beta * tf.reduce_sum( rho * tf.log(tf.div(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.sub(float(1), p_hat))) )
727+
p_hat = tf.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 )
728+
try: ## TF1.0
729+
KLD = beta * tf.reduce_sum( rho * tf.log(tf.divide(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.subtract(float(1), p_hat))) )
730+
except: ## TF0.12
731+
KLD = beta * tf.reduce_sum( rho * tf.log(tf.div(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.sub(float(1), p_hat))) )
729732
# KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) )
730733
# theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) )
731734
# Total cost
@@ -3223,7 +3226,10 @@ def __init__(
32233226
self.bw_initial_state = bw_initial_state
32243227
# exit()
32253228
# Feedforward to MultiRNNCell
3226-
list_rnn_inputs = tf.unpack(self.inputs, axis=1)
3229+
try: ## TF1.0
3230+
list_rnn_inputs = tf.unstack(self.inputs, axis=1)
3231+
except: ## TF0.12
3232+
list_rnn_inputs = tf.unpack(self.inputs, axis=1)
32273233
outputs, fw_state, bw_state = tf.nn.bidirectional_rnn(
32283234
cell_fw=self.fw_cell,
32293235
cell_bw=self.bw_cell,
@@ -3340,8 +3346,12 @@ def retrieve_seq_length_op(data):
33403346
- Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`_.
33413347
"""
33423348
with tf.name_scope('GetLength'):
3343-
used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2))
3344-
length = tf.reduce_sum(used, reduction_indices=1)
3349+
## TF 1.0 change reduction_indices to axis
3350+
used = tf.sign(tf.reduce_max(tf.abs(data), 2))
3351+
length = tf.reduce_sum(used, 1)
3352+
## TF < 1.0
3353+
# used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2))
3354+
# length = tf.reduce_sum(used, reduction_indices=1)
33453355
length = tf.cast(length, tf.int32)
33463356
return length
33473357

@@ -3535,8 +3545,12 @@ def __init__(
35353545

35363546
# Computes sequence_length
35373547
if sequence_length is None:
3538-
sequence_length = retrieve_seq_length_op(
3539-
self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs))
3548+
try: ## TF1.0
3549+
sequence_length = retrieve_seq_length_op(
3550+
self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs))
3551+
except: ## TF0.12
3552+
sequence_length = retrieve_seq_length_op(
3553+
self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs))
35403554

35413555
# Main - Computes outputs and last_states
35423556
with tf.variable_scope(name, initializer=initializer) as vs:
@@ -3554,7 +3568,7 @@ def __init__(
35543568
# Manage the outputs
35553569
if return_last:
35563570
# [batch_size, n_hidden]
3557-
# outputs = tf.transpose(tf.pack(outputs), [1, 0, 2])
3571+
# outputs = tf.transpose(tf.pack(outputs), [1, 0, 2]) # TF1.0 tf.pack --> tf.stack
35583572
self.outputs = advanced_indexing_op(outputs, sequence_length)
35593573
else:
35603574
# [batch_size, n_step(max), n_hidden]
@@ -3742,8 +3756,12 @@ def __init__(
37423756
self.bw_initial_state = bw_initial_state
37433757
# Computes sequence_length
37443758
if sequence_length is None:
3745-
sequence_length = retrieve_seq_length_op(
3746-
self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs))
3759+
try: ## TF1.0
3760+
sequence_length = retrieve_seq_length_op(
3761+
self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs))
3762+
except: ## TF0.12
3763+
sequence_length = retrieve_seq_length_op(
3764+
self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs))
37473765

37483766
outputs, (states_fw, states_bw) = tf.nn.bidirectional_dynamic_rnn(
37493767
cell_fw=self.fw_cell,
@@ -4230,7 +4248,7 @@ class ElementwiseLayer(Layer):
42304248
layer : a list of :class:`Layer` instances
42314249
The `Layer` class feeding into this layer.
42324250
combine_fn : a TensorFlow elemwise-merge function
4233-
e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.mul`` and so on.
4251+
e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.multiply`` and so on.
42344252
See `TensorFlow Math API <https://www.tensorflow.org/versions/master/api_docs/python/math_ops.html#math>`_ .
42354253
name : a string or None
42364254
An optional name to attach to this layer.
@@ -4302,7 +4320,7 @@ def __init__(
43024320

43034321
print(" tensorlayer:Instantiate ExpandDimsLayer %s" % self.name)
43044322
with tf.variable_scope(name) as vs:
4305-
try: # TF12
4323+
try: # TF12 TF1.0
43064324
self.outputs = tf.expand_dims(self.inputs, axis=axis)
43074325
except: # TF11
43084326
self.outputs = tf.expand_dims(self.inputs, dim=axis)
@@ -4451,7 +4469,10 @@ def __init__(
44514469
# with tf.name_scope(name) as scope:
44524470
with tf.variable_scope(name) as vs:
44534471
alphas = tf.get_variable(name='alphas', shape=w_shape, initializer=a_init, **a_init_args )
4454-
self.outputs = tf.nn.relu(self.inputs) + tf.mul(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5
4472+
try: ## TF 1.0
4473+
self.outputs = tf.nn.relu(self.inputs) + tf.mulitply(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5
4474+
except: ## TF 0.12
4475+
self.outputs = tf.nn.relu(self.inputs) + tf.mul(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5
44554476

44564477
self.all_layers = list(layer.all_layers)
44574478
self.all_params = list(layer.all_params)
@@ -4512,7 +4533,7 @@ class MultiplexerLayer(Layer):
45124533
45134534
References
45144535
------------
4515-
- See ``tf.pack()`` and ``tf.gather()`` at `TensorFlow - Slicing and Joining <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#slicing-and-joining>`_
4536+
- See ``tf.pack() for TF0.12 or tf.stack() for TF1.0`` and ``tf.gather()`` at `TensorFlow - Slicing and Joining <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#slicing-and-joining>`_
45164537
"""
45174538
def __init__(self,
45184539
layer = [],
@@ -4523,7 +4544,10 @@ def __init__(self,
45234544
self.inputs = []
45244545
for l in layer:
45254546
self.inputs.append(l.outputs)
4526-
all_inputs = tf.pack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2
4547+
try: ## TF1.0
4548+
all_inputs = tf.stack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2
4549+
except:
4550+
all_inputs = tf.pack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2
45274551

45284552
print(" tensorlayer:Instantiate MultiplexerLayer %s: n_inputs: %d" % (self.name, self.n_inputs))
45294553

tensorlayer/rein.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,5 +64,8 @@ def cross_entropy_reward_loss(logits, actions, rewards):
6464
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
6565
"""
6666
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, actions)
67-
loss = tf.reduce_sum(tf.mul(cross_entropy, rewards)) # element-wise mul
67+
try: ## TF1.0
68+
loss = tf.reduce_sum(tf.multiply(cross_entropy, rewards))
69+
except: ## TF0.12
70+
loss = tf.reduce_sum(tf.mul(cross_entropy, rewards)) # element-wise mul
6871
return loss

0 commit comments

Comments
 (0)