Skip to content

Commit 590855b

Browse files
authored
Update core.py
1 parent c9e31e2 commit 590855b

File tree

1 file changed

+24
-51
lines changed

1 file changed

+24
-51
lines changed

tensorlayer/models/core.py

Lines changed: 24 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -74,55 +74,53 @@ class Model(object):
7474
>>> from tensorlayer.layers import Input, Dense, Dropout
7575
>>> from tensorlayer.models import Model
7676
77-
- Define static model
77+
Define static model
7878
>>> class CustomModel(Model):
7979
>>> def __init__(self):
8080
>>> super(CustomModel, self).__init__()
8181
>>> self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784)
8282
>>> self.dropout1 = Dropout(keep=0.8)
8383
>>> self.dense2 = Dense(n_units=10, in_channels=800)
84-
8584
>>> def forward(self, x):
8685
>>> z = self.dense1(x)
8786
>>> z = self.dropout1(z)
8887
>>> z = self.dense2(z)
8988
>>> return z
9089
>>> M_dynamic = CustomModel()
9190
92-
- Define static model
91+
Define static model
9392
9493
>>> ni = Input([None, 784])
9594
>>> nn = Dense(n_units=800, act=tf.nn.relu)(ni)
9695
>>> nn = Dropout(keep=0.8)(nn)
9796
>>> nn = Dense(n_units=10, act=tf.nn.relu)(nn)
9897
>>> M_static = Model(inputs=ni, outputs=nn, name="mlp")
9998
100-
- Get network information
99+
Get network information
101100
>>> print(M_static)
102-
Model(
103-
(_inputlayer): Input(shape=[None, 784], name='_inputlayer')
104-
(dense): Dense(n_units=800, relu, in_channels='784', name='dense')
105-
(dropout): Dropout(keep=0.8, name='dropout')
106-
(dense_1): Dense(n_units=10, relu, in_channels='800', name='dense_1')
107-
)
108-
109-
- Forwarding through this network
101+
... Model(
102+
... (_inputlayer): Input(shape=[None, 784], name='_inputlayer')
103+
... (dense): Dense(n_units=800, relu, in_channels='784', name='dense')
104+
... (dropout): Dropout(keep=0.8, name='dropout')
105+
... (dense_1): Dense(n_units=10, relu, in_channels='800', name='dense_1')
106+
... )
107+
108+
Forwarding through this network
110109
>>> data = np.random.normal(size=[16, 784]).astype(np.float32)
111110
>>> outputs_d = M_dynamic(data)
112111
>>> outputs_s = M_static(data)
113112
114-
- Save and load weights
113+
Save and load weights
115114
>>> M_static.save_weights('./model_weights.h5')
116115
>>> M_static.load_weights('./model_weights.h5')
117116
118-
- Save and load the model
117+
Save and load the model
119118
>>> M_static.save('./model.h5')
120119
>>> M = Model.load('./model.h5')
121120
122-
- Convert model to layer
121+
Convert model to layer
123122
>>> M_layer = M_static.as_layer()
124-
125-
-----
123+
126124
"""
127125

128126
@property
@@ -253,9 +251,7 @@ def __call__(self, inputs, is_train=None, **kwargs):
253251
If 'is_train' == False, this network is set as evaluation mode
254252
kwargs :
255253
For other keyword-only arguments.
256-
Returns
257-
-------
258-
254+
259255
"""
260256

261257
self._check_mode(is_train)
@@ -426,10 +422,6 @@ def train(self):
426422
>>> import tensorlayer as tl
427423
>>> net = tl.models.vgg16()
428424
>>> net.train()
429-
# do training
430-
431-
Returns
432-
-------
433425
434426
"""
435427
if self.is_train !=True:
@@ -447,9 +439,6 @@ def eval(self):
447439
>>> net.eval()
448440
# do evaluation
449441
450-
Returns
451-
-------
452-
453442
"""
454443
if self.is_train != False:
455444
self.is_train = False
@@ -480,9 +469,6 @@ def as_layer(self):
480469
>>> nn = Dense(n_units=10, act=tf.nn.relu)(nn)
481470
>>> M_full = Model(inputs=ni, outputs=nn, name="mlp")
482471
483-
Returns
484-
-------
485-
486472
"""
487473
if self._outputs is None:
488474
raise AttributeError("Dynamic network cannot be converted to Layer.")
@@ -500,9 +486,6 @@ def _check_mode(self, is_train):
500486
is_train : boolean
501487
Network's mode. True means training mode while False means evaluation mode.
502488
503-
Returns
504-
-------
505-
506489
"""
507490
# contradiction test
508491
if is_train is None and self.is_train is None:
@@ -530,9 +513,6 @@ def _set_mode_for_layers(self, is_train):
530513
is_train : boolean
531514
Network's mode. True means training mode while False means evaluation mode.
532515
533-
Returns
534-
-------
535-
536516
"""
537517
for layer in self.all_layers:
538518
if isinstance(layer, Model):
@@ -680,16 +660,16 @@ def release_memory(self):
680660
--------
681661
>>> import tensorlayer as tl
682662
>>> vgg = tl.models.vgg16()
683-
# training preparation
684-
# ...
685-
# back propagation
663+
... # training preparation
664+
... # ...
665+
... # back propagation
686666
>>> with tf.GradientTape() as tape:
687667
>>> _logits = vgg(x_batch)
688-
## compute loss and update model
668+
>>> ## compute loss and update model
689669
>>> _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss')
690-
## release unnecessary objects (layer.inputs, layer.outputs)
691-
## this function should be called with great caution
692-
## within the scope of tf.GradientTape(), using this function should be fine
670+
>>> ## release unnecessary objects (layer.inputs, layer.outputs)
671+
>>> ## this function should be called with great caution
672+
>>> ## within the scope of tf.GradientTape(), using this function should be fine
693673
>>> vgg.release_memory()
694674
695675
'''
@@ -702,7 +682,6 @@ def save(self, filepath, save_weights=True):
702682
This function save can save both the architecture of neural networks and weights (optional).
703683
WARNING: If the model contains Lambda / ElementwiseLambda layer, please check the documentation of Lambda / ElementwiseLambda layer and find out the cases that have / have not been supported by Model.save().
704684
705-
706685
Parameters
707686
----------
708687
filepath : str
@@ -715,6 +694,7 @@ def save(self, filepath, save_weights=True):
715694
>>> net = tl.models.vgg16()
716695
>>> net.save('./model.h5', save_weights=True)
717696
>>> new_net = Model.load('./model.h5', load_weights=True)
697+
718698
"""
719699
# TODO: support saving LambdaLayer that includes parametric self defined function with outside variables
720700
if self.outputs is None:
@@ -731,7 +711,6 @@ def load(filepath, load_weights=True):
731711
When a model is loaded by this function load, there is no need to reimplement or declare the architecture of the model explicitly in code.
732712
WARNING: If the model contains Lambda / ElementwiseLambda layer, please check the documentation of Lambda / ElementwiseLambda layer and find out the cases that have / have not been supported by Model.load().
733713
734-
735714
Parameters
736715
----------
737716
filepath : str
@@ -783,9 +762,6 @@ def save_weights(self, filepath, format=None):
783762
>>> net.save_weights('./model.npz')
784763
>>> net.save_weights('./model.npz', format='npz_dict')
785764
786-
Returns
787-
-------
788-
789765
"""
790766
if self.weights is None or len(self.weights) == 0:
791767
logging.warning("Model contains no weights or layers haven't been built, nothing will be saved")
@@ -859,9 +835,6 @@ def load_weights(self, filepath, format=None, in_order=True, skip=False):
859835
2) 'skip' is useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True,
860836
'in_order' argument will be ignored.
861837
862-
Returns
863-
-------
864-
865838
"""
866839
if not os.path.exists(filepath):
867840
raise FileNotFoundError("file {} doesn't exist.".format(filepath))

0 commit comments

Comments
 (0)