Skip to content

Commit 6bc1338

Browse files
committed
fix unused arguments.
1 parent 1788549 commit 6bc1338

File tree

13 files changed

+139
-388
lines changed

13 files changed

+139
-388
lines changed

docs/modules/visualize.rst

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ to visualize the model, activations etc. Here we provide more functions for data
1717
frame
1818
images2d
1919
tsne_embedding
20-
20+
draw_weights
2121

2222
Save and read images
2323
----------------------
@@ -65,3 +65,7 @@ Images by matplotlib
6565
Visualize embeddings
6666
--------------------
6767
.. autofunction:: tsne_embedding
68+
69+
Visualize weights
70+
--------------------
71+
.. autofunction:: draw_weights

example/tutorial_mnist.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def main_test_layers(model='relu'):
144144
print(" val acc: %f" % (val_acc / n_batch))
145145
try:
146146
# You can visualize the weight of 1st hidden layer as follow.
147-
tl.vis.W(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
147+
tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
148148
# You can also save the weight of 1st hidden layer to .npz file.
149149
# tl.files.save_npz([network.all_params[0]] , name='w1'+str(epoch+1)+'.npz')
150150
except:
@@ -370,7 +370,7 @@ def main_test_stacked_denoise_AE(model='relu'):
370370
print(" val acc: %f" % (val_acc / n_batch))
371371
try:
372372
# visualize the 1st hidden layer during fine-tune
373-
tl.vis.W(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
373+
tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
374374
except:
375375
print("You should change vis.W(), if you want to save the feature images for different dataset")
376376

tensorlayer/cost.py

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,7 @@ def li_regularizer(scale, scope=None):
513513
logging.info('Scale of 0 disables regularizer.')
514514
return lambda _, name=None: None
515515

516-
def li(weights, name=None):
516+
def li(weights):
517517
"""Applies li regularization to weights."""
518518
with tf.name_scope('li_regularizer') as scope:
519519
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
@@ -526,7 +526,7 @@ def li(weights, name=None):
526526
return li
527527

528528

529-
def lo_regularizer(scale, scope=None):
529+
def lo_regularizer(scale):
530530
"""Lo regularization removes the neurons of current layer. The `o` represents `outputs`
531531
Returns a function that can be used to apply group lo regularization to weights.
532532
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
@@ -535,8 +535,6 @@ def lo_regularizer(scale, scope=None):
535535
----------
536536
scale : float
537537
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
538-
scope: str
539-
An optional scope name for this function.
540538
541539
Returns
542540
-------
@@ -576,7 +574,7 @@ def lo(weights, name='lo_regularizer'):
576574
return lo
577575

578576

579-
def maxnorm_regularizer(scale=1.0, scope=None):
577+
def maxnorm_regularizer(scale=1.0):
580578
"""Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.
581579
582580
More about max-norm, see `wiki-max norm <https://en.wikipedia.org/wiki/Matrix_norm#Max_norm>`_.
@@ -586,8 +584,6 @@ def maxnorm_regularizer(scale=1.0, scope=None):
586584
----------
587585
scale : float
588586
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
589-
scope: str
590-
An optional scope name for this function.
591587
592588
Returns
593589
---------
@@ -627,7 +623,7 @@ def mn(weights, name='max_regularizer'):
627623
return mn
628624

629625

630-
def maxnorm_o_regularizer(scale, scope):
626+
def maxnorm_o_regularizer(scale):
631627
"""Max-norm output regularization removes the neurons of current layer.
632628
Returns a function that can be used to apply max-norm regularization to each column of weight matrix.
633629
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
@@ -636,8 +632,6 @@ def maxnorm_o_regularizer(scale, scope):
636632
----------
637633
scale : float
638634
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
639-
scope: str
640-
An optional scope name for this function.
641635
642636
Returns
643637
---------
@@ -677,7 +671,7 @@ def mn_o(weights, name='maxnorm_o_regularizer'):
677671
return mn_o
678672

679673

680-
def maxnorm_i_regularizer(scale, scope=None):
674+
def maxnorm_i_regularizer(scale):
681675
"""Max-norm input regularization removes the neurons of previous layer.
682676
Returns a function that can be used to apply max-norm regularization to each row of weight matrix.
683677
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
@@ -686,8 +680,6 @@ def maxnorm_i_regularizer(scale, scope=None):
686680
----------
687681
scale : float
688682
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
689-
scope: str
690-
An optional scope name for this function.
691683
692684
Returns
693685
---------
@@ -725,6 +717,3 @@ def mn_i(weights, name='maxnorm_i_regularizer'):
725717
return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
726718

727719
return mn_i
728-
729-
730-
#

tensorlayer/db.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -494,13 +494,13 @@ def __init__(self, db, model):
494494
self.db = db
495495
self.model = model
496496

497-
def on_train_begin(self, logs={}):
497+
def on_train_begin(self):
498498
print("start")
499499

500-
def on_train_end(self, logs={}):
500+
def on_train_end(self):
501501
print("end")
502502

503-
def on_epoch_begin(self, epoch, logs={}):
503+
def on_epoch_begin(self, epoch):
504504
self.epoch = epoch
505505
self.et = time.time()
506506
return
@@ -525,7 +525,7 @@ def on_batch_begin(self, batch, logs={}):
525525
self.losses = []
526526
self.batch = batch
527527

528-
def on_batch_end(self, batch, logs={}):
528+
def on_batch_end(self, logs={}):
529529
self.t2 = time.time() - self.t
530530
logs['acc'] = np.asscalar(logs['acc'])
531531
#logs['loss']=np.asscalar(logs['loss'])

tensorlayer/files.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def load_mnist_labels(path, filename):
117117
return X_train, y_train, X_val, y_val, X_test, y_test
118118

119119

120-
def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False, second=3):
120+
def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False):
121121
"""Load CIFAR-10 dataset.
122122
123123
It consists of 60000 32x32 colour images in 10 classes, with
@@ -137,8 +137,6 @@ def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False, sec
137137
The path that the data is downloaded to, defaults is ``data/cifar10/``.
138138
plotable : boolean
139139
Whether to plot some image examples, False as default.
140-
second : int
141-
If ``plotable`` is True, it is the display time.
142140
143141
Examples
144142
--------
@@ -1791,4 +1789,4 @@ def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'):
17911789
for f in file_list:
17921790
W = load_npz(path, f)[0]
17931791
logging.info("%s --> %s" % (f, f.split('.')[0] + '.pdf'))
1794-
visualize.W(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012)
1792+
visualize.draw_weights(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012)

tensorlayer/iterate.py

Lines changed: 0 additions & 153 deletions
Original file line numberDiff line numberDiff line change
@@ -269,156 +269,3 @@ def ptb_iterator(raw_data, batch_size, num_steps):
269269
x = data[:, i * num_steps:(i + 1) * num_steps]
270270
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
271271
yield (x, y)
272-
273-
274-
# def minibatches_for_sequence2D(inputs, targets, batch_size, sequence_length, stride=1):
275-
# """
276-
# Input a group of example in 2D numpy.array and their labels.
277-
# Return the examples and labels by the given batchsize, sequence_length.
278-
# Use for RNN.
279-
#
280-
# Parameters
281-
# ----------
282-
# inputs : numpy.array
283-
# (X) The input features, every row is a example.
284-
# targets : numpy.array
285-
# (y) The labels of inputs, every row is a example.
286-
# batchsize : int
287-
# The batch size must be a multiple of sequence_length: int(batch_size % sequence_length) == 0
288-
# sequence_length : int
289-
# The sequence length
290-
# stride : int
291-
# The stride step
292-
#
293-
# Examples
294-
# --------
295-
# >>> sequence_length = 2
296-
# >>> batch_size = 4
297-
# >>> stride = 1
298-
# >>> X_train = np.asarray([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15],[16,17,18],[19,20,21],[22,23,24]])
299-
# >>> y_train = np.asarray(['0','1','2','3','4','5','6','7'])
300-
# >>> print('X_train = %s' % X_train)
301-
# >>> print('y_train = %s' % y_train)
302-
# >>> for batch in minibatches_for_sequence2D(X_train, y_train, batch_size=batch_size, sequence_length=sequence_length, stride=stride):
303-
# >>> inputs, targets = batch
304-
# >>> print(inputs)
305-
# >>> print(targets)
306-
# ... [[ 1. 2. 3.]
307-
# ... [ 4. 5. 6.]
308-
# ... [ 4. 5. 6.]
309-
# ... [ 7. 8. 9.]]
310-
# ... [1 2]
311-
# ... [[ 4. 5. 6.]
312-
# ... [ 7. 8. 9.]
313-
# ... [ 7. 8. 9.]
314-
# ... [ 10. 11. 12.]]
315-
# ... [2 3]
316-
# ... ...
317-
# ... [[ 16. 17. 18.]
318-
# ... [ 19. 20. 21.]
319-
# ... [ 19. 20. 21.]
320-
# ... [ 22. 23. 24.]]
321-
# ... [6 7]
322-
# """
323-
# print('len(targets)=%d batch_size=%d sequence_length=%d stride=%d' % (len(targets), batch_size, sequence_length, stride))
324-
# assert len(inputs) == len(targets), '1 feature vector have 1 target vector/value' #* sequence_length
325-
# # assert int(batch_size % sequence_length) == 0, 'batch_size % sequence_length must == 0\
326-
# # batch_size is number of examples rather than number of targets'
327-
#
328-
# # print(inputs.shape, len(inputs), len(inputs[0]))
329-
#
330-
# n_targets = int(batch_size/sequence_length)
331-
# # n_targets = int(np.ceil(batch_size/sequence_length))
332-
# X = np.empty(shape=(0,len(inputs[0])), dtype=np.float32)
333-
# y = np.zeros(shape=(1, n_targets), dtype=np.int32)
334-
#
335-
# for idx in range(sequence_length, len(inputs), stride): # go through all example during 1 epoch
336-
# for n in range(n_targets): # for num of target
337-
# X = np.concatenate((X, inputs[idx-sequence_length+n:idx+n]))
338-
# y[0][n] = targets[idx-1+n]
339-
# # y = np.vstack((y, targets[idx-1+n]))
340-
# yield X, y[0]
341-
# X = np.empty(shape=(0,len(inputs[0])))
342-
# # y = np.empty(shape=(1,0))
343-
#
344-
#
345-
# def minibatches_for_sequence4D(inputs, targets, batch_size, sequence_length, stride=1): #
346-
# """
347-
# Input a group of example in 4D numpy.array and their labels.
348-
# Return the examples and labels by the given batchsize, sequence_length.
349-
# Use for RNN.
350-
#
351-
# Parameters
352-
# ----------
353-
# inputs : numpy.array
354-
# (X) The input features, every row is a example.
355-
# targets : numpy.array
356-
# (y) The labels of inputs, every row is a example.
357-
# batchsize : int
358-
# The batch size must be a multiple of sequence_length: int(batch_size % sequence_length) == 0
359-
# sequence_length : int
360-
# The sequence length
361-
# stride : int
362-
# The stride step
363-
#
364-
# Examples
365-
# --------
366-
# >>> sequence_length = 2
367-
# >>> batch_size = 2
368-
# >>> stride = 1
369-
# >>> X_train = np.asarray([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15],[16,17,18],[19,20,21],[22,23,24]])
370-
# >>> y_train = np.asarray(['0','1','2','3','4','5','6','7'])
371-
# >>> X_train = np.expand_dims(X_train, axis=1)
372-
# >>> X_train = np.expand_dims(X_train, axis=3)
373-
# >>> for batch in minibatches_for_sequence4D(X_train, y_train, batch_size=batch_size, sequence_length=sequence_length, stride=stride):
374-
# >>> inputs, targets = batch
375-
# >>> print(inputs)
376-
# >>> print(targets)
377-
# ... [[[[ 1.]
378-
# ... [ 2.]
379-
# ... [ 3.]]]
380-
# ... [[[ 4.]
381-
# ... [ 5.]
382-
# ... [ 6.]]]]
383-
# ... [1]
384-
# ... [[[[ 4.]
385-
# ... [ 5.]
386-
# ... [ 6.]]]
387-
# ... [[[ 7.]
388-
# ... [ 8.]
389-
# ... [ 9.]]]]
390-
# ... [2]
391-
# ... ...
392-
# ... [[[[ 19.]
393-
# ... [ 20.]
394-
# ... [ 21.]]]
395-
# ... [[[ 22.]
396-
# ... [ 23.]
397-
# ... [ 24.]]]]
398-
# ... [7]
399-
# """
400-
# print('len(targets)=%d batch_size=%d sequence_length=%d stride=%d' % (len(targets), batch_size, sequence_length, stride))
401-
# assert len(inputs) == len(targets), '1 feature vector have 1 target vector/value' #* sequence_length
402-
# # assert int(batch_size % sequence_length) == 0, 'in LSTM, batch_size % sequence_length must == 0\
403-
# # batch_size is number of X_train rather than number of targets'
404-
# assert stride >= 1, 'stride must be >=1, at least move 1 step for each iternation'
405-
#
406-
# n_example, n_channels, width, height = inputs.shape
407-
# print('n_example=%d n_channels=%d width=%d height=%d' % (n_example, n_channels, width, height))
408-
#
409-
# n_targets = int(np.ceil(batch_size/sequence_length)) # 实际为 batchsize/sequence_length + 1
410-
# print(n_targets)
411-
# X = np.zeros(shape=(batch_size, n_channels, width, height), dtype=np.float32)
412-
# # X = np.zeros(shape=(n_targets, sequence_length, n_channels, width, height), dtype=np.float32)
413-
# y = np.zeros(shape=(1,n_targets), dtype=np.int32)
414-
# # y = np.empty(shape=(0,1), dtype=np.float32)
415-
# # time.sleep(2)
416-
# for idx in range(sequence_length, n_example-n_targets+2, stride): # go through all example during 1 epoch
417-
# for n in range(n_targets): # for num of target
418-
# # print(idx+n, inputs[idx-sequence_length+n : idx+n].shape)
419-
# X[n*sequence_length : (n+1)*sequence_length] = inputs[idx+n-sequence_length : idx+n]
420-
# # X[n] = inputs[idx-sequence_length+n:idx+n]
421-
# y[0][n] = targets[idx+n-1]
422-
# # y = np.vstack((y, targets[idx-1+n]))
423-
# # y = targets[idx: idx+n_targets]
424-
# yield X, y[0]

tensorlayer/layers/convolution.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ def __init__(
4646
stride=1,
4747
dilation_rate=1,
4848
padding='SAME',
49-
# use_cudnn_on_gpu=None,
5049
data_format='NWC',
5150
W_init=tf.truncated_normal_initializer(stddev=0.02),
5251
b_init=tf.constant_initializer(value=0.0),
@@ -1165,7 +1164,6 @@ def conv1d(
11651164
dilation_rate=1,
11661165
act=tf.identity,
11671166
padding='SAME',
1168-
# use_cudnn_on_gpu=None,
11691167
data_format="NWC",
11701168
W_init=tf.truncated_normal_initializer(stddev=0.02),
11711169
b_init=tf.constant_initializer(value=0.0),

tensorlayer/layers/core.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1063,7 +1063,8 @@ def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batc
10631063
logging.info(" val loss: %f" % (val_loss / n_batch))
10641064
if save:
10651065
try:
1066-
visualize.W(self.train_params[0].eval(), second=10, saveable=True, shape=[28, 28], name=save_name + str(epoch + 1), fig_idx=2012)
1066+
visualize.draw_weights(
1067+
self.train_params[0].eval(), second=10, saveable=True, shape=[28, 28], name=save_name + str(epoch + 1), fig_idx=2012)
10671068
files.save_npz([self.all_params[0]], name=save_name + str(epoch + 1) + '.npz')
10681069
except:
10691070
raise Exception(

0 commit comments

Comments
 (0)