@@ -38,6 +38,13 @@ def __init__(self, epoch_id):
38
38
39
39
40
40
class EndEpochEvent (object ):
41
+ """
42
+ The end of a training epoch.
43
+
44
+ Args:
45
+ epoch_id(int): The current epoch ID.
46
+ """
47
+
41
48
def __init__ (self , epoch_id ):
42
49
self .epoch = epoch_id
43
50
@@ -50,13 +57,44 @@ def __init__(self, epoch_id, step_id):
50
57
51
58
52
59
class EndStepEvent (object ):
60
+ """
61
+ The end of a training step.
62
+
63
+ Args:
64
+ epoch_id(int): The current epoch ID.
65
+ step_id(int): The current step ID.
66
+ metrics(list): A list of fetched tensor. The order of this list is same
67
+ as the :code:`train_func` returns.
68
+ """
69
+
53
70
def __init__ (self , epoch_id , step_id , metrics ):
54
71
self .epoch = epoch_id
55
72
self .step = step_id
56
73
self .metrics = metrics
57
74
58
75
59
76
class CheckpointConfig (object ):
77
+ """
78
+ Parameter object for :code:`fluid.io.save_checkpoint` and
79
+ :code:`fluid.Trainer`. Used to configuration how to save checkpoint.
80
+
81
+ Args:
82
+ checkpoint_dir(str): Directory path to save check point. Default is the
83
+ current directory.
84
+
85
+ max_num_checkpoints(int): The max number of local check points.
86
+ epoch_interval(int): Every number of epoch to save check point.
87
+ step_interval(int): Every number of step to save check point.
88
+
89
+ Examples:
90
+ >>> config = fluid.CheckpointConfig("./checkpoints")
91
+ >>> trainer = fluid.Trainer(train_func=train_program,
92
+ >>> place=place,
93
+ >>> optimizer_func=optimizer_func,
94
+ >>> checkpoint_config=config)
95
+ >>> trainer.train(...)
96
+ """
97
+
60
98
def __init__ (self ,
61
99
checkpoint_dir = None ,
62
100
max_num_checkpoints = 3 ,
0 commit comments