@@ -36,7 +36,7 @@ class LRScheduler(object):
3636
3737 Examples
3838 --------
39- With TensorLayer
39+ With TensorLayerX
4040
4141 >>> #Here is an example of a simple ``StepDecay`` implementation.
4242 >>> import tensorlayerx as tlx
@@ -118,7 +118,7 @@ class StepDecay(LRScheduler):
118118
119119 Examples
120120 --------
121- With TensorLayer
121+ With TensorLayerX
122122
123123 >>> import tensorlayerx as tlx
124124 >>> scheduler = tlx.optimizers.lr.StepDecay(learning_rate = 0.1, step_size = 10, gamma = 0.1, last_epoch = -1, verbose = False)
@@ -180,10 +180,10 @@ class CosineAnnealingDecay(LRScheduler):
180180
181181 Examples
182182 --------
183- With TensorLayer
183+ With TensorLayerX
184184
185185 >>> import tensorlayerx as tlx
186- >>> scheduler = tlx.optimizers.lr.CosineAnnealingDecay(learning_rate = 0.1, step = 10, gamma = 0.1 , last_epoch = -1, verbose = False)
186+ >>> scheduler = tlx.optimizers.lr.CosineAnnealingDecay(learning_rate = 0.1, T_max = 10, eta_min=0 , last_epoch= -1, verbose= False)
187187 >>> sgd = tlx.optimizers.SGD(learning_rate=scheduler,momentum=0.2)
188188 >>> for epoch in range(100):
189189 >>> for step in range(100):
@@ -248,7 +248,7 @@ class NoamDecay(LRScheduler):
248248
249249 Examples
250250 --------
251- With TensorLayer
251+ With TensorLayerX
252252
253253 >>> import tensorlayerx as tlx
254254 >>> scheduler = tlx.optimizers.lr.NoamDecay(d_model=0.01, warmup_steps=100, verbose=True)
@@ -308,7 +308,7 @@ class PiecewiseDecay(LRScheduler):
308308
309309 Examples
310310 --------
311- With TensorLayer
311+ With TensorLayerX
312312
313313 >>> import tensorlayerx as tlx
314314 >>> scheduler = tlx.optimizers.lr.PiecewiseDecay(boundaries=[100, 200], values=[0.1, 0.5, 0.1], verbose=True)
@@ -359,7 +359,7 @@ class NaturalExpDecay(LRScheduler):
359359
360360 Examples
361361 --------
362- With TensorLayer
362+ With TensorLayerX
363363
364364 >>> import tensorlayerx as tlx
365365 >>> scheduler = tlx.optimizers.lr.NaturalExpDecay(learning_rate=0.1, gamma=0.1, verbose=True)
@@ -406,7 +406,7 @@ class InverseTimeDecay(LRScheduler):
406406
407407 Examples
408408 --------
409- With TensorLayer
409+ With TensorLayerX
410410
411411 >>> import tensorlayerx as tlx
412412 >>> scheduler = tlx.optimizers.lr.InverseTimeDecay(learning_rate=0.1, gamma=0.1, verbose=True)
@@ -471,7 +471,7 @@ class PolynomialDecay(LRScheduler):
471471
472472 Examples
473473 --------
474- With TensorLayer
474+ With TensorLayerX
475475
476476 >>> import tensorlayerx as tlx
477477 >>> scheduler = tlx.optimizers.lr.PolynomialDecay(learning_rate=0.1, decay_steps=50, verbose=True)
@@ -549,7 +549,7 @@ class LinearWarmup(LRScheduler):
549549
550550 Examples
551551 --------
552- With TensorLayer
552+ With TensorLayerX
553553
554554 >>> import tensorlayerx as tlx
555555 >>> scheduler = tlx.optimizers.lr.LinearWarmup(learning_rate=0.1, warmup_steps=20, start_lr=0.0, end_lr=0.5, verbose=True)
@@ -617,7 +617,7 @@ class ExponentialDecay(LRScheduler):
617617
618618 Examples
619619 --------
620- With TensorLayer
620+ With TensorLayerX
621621
622622 >>> import tensorlayerx as tlx
623623 >>> scheduler = tlx.optimizers.lr.ExponentialDecay(learning_rate=0.1, gamma=0.9, verbose=True)
@@ -675,7 +675,7 @@ class MultiStepDecay(LRScheduler):
675675
676676 Examples
677677 --------
678- With TensorLayer
678+ With TensorLayerX
679679
680680 >>> import tensorlayerx as tlx
681681 >>> scheduler = tlx.optimizers.lr.MultiStepDecay(learning_rate=0.1, milestones=[50, 100], gamma=0.1, verbose=True)
@@ -743,7 +743,7 @@ class LambdaDecay(LRScheduler):
743743
744744 Examples
745745 --------
746- With TensorLayer
746+ With TensorLayerX
747747
748748 >>> import tensorlayerx as tlx
749749 >>> scheduler = tlx.optimizers.lr.LambdaDecay(learning_rate=0.1, lr_lambda=lambda x:0.9**x, verbose=True)
@@ -812,7 +812,7 @@ class ReduceOnPlateau(LRScheduler):
812812
813813 Examples
814814 --------
815- With TensorLayer
815+ With TensorLayerX
816816
817817 >>> import tensorlayerx as tlx
818818 >>> scheduler = tlx.optimizers.lr.ReduceOnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True)
@@ -878,11 +878,11 @@ def step(self, metrics, epoch=None):
878878 if isinstance (metrics , (tf .Tensor , np .ndarray )):
879879 assert len (metrics .shape ) == 1 and metrics .shape [0 ] == 1 , "the metrics.shape " \
880880 "should be (1L,), but the current metrics.shape is {}. Maybe that " \
881- "you should call paddle.mean to process it first." .format (
881+ "you should call tlx.reudce_mean to process it first." .format (
882882 metrics .shape )
883883 elif not isinstance (metrics , (int , float , np .float32 , np .float64 )):
884884 raise TypeError (
885- "metrics must be 'int', 'float', 'np.float', 'numpy.ndarray' or 'paddle.Tensor' , but receive {}" .format (
885+ "metrics must be 'int', 'float', 'np.float', 'numpy.ndarray', but receive {}" .format (
886886 type (metrics )
887887 )
888888 )
0 commit comments