@@ -61,6 +61,7 @@ def exp_decay(global_step):
6161 Setting this value, allows consistency between reruns.
6262 continue_training: when continue_training is True, once initialized
6363 model will be continuely trained on every call of fit.
64+ num_cores: Number of cores to be used. (default: 4)
6465 early_stopping_rounds: Activates early stopping if this is not None.
6566 Loss needs to decrease at least every every <early_stopping_rounds>
6667 round(s) to continue training. (default: None)
@@ -79,7 +80,7 @@ def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
7980 steps = 50 , optimizer = "SGD" , learning_rate = 0.1 ,
8081 class_weight = None ,
8182 tf_random_seed = 42 , continue_training = False ,
82- verbose = 1 , early_stopping_rounds = None ,
83+ num_cores = 4 , verbose = 1 , early_stopping_rounds = None ,
8384 max_to_keep = 5 , keep_checkpoint_every_n_hours = 10000 ):
8485 self .rnn_size = rnn_size
8586 self .cell_type = cell_type
@@ -94,7 +95,8 @@ def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
9495 batch_size = batch_size , steps = steps , optimizer = optimizer ,
9596 learning_rate = learning_rate , class_weight = class_weight ,
9697 tf_random_seed = tf_random_seed ,
97- continue_training = continue_training , verbose = verbose ,
98+ continue_training = continue_training , num_cores = num_cores ,
99+ verbose = verbose ,
98100 early_stopping_rounds = early_stopping_rounds ,
99101 max_to_keep = max_to_keep ,
100102 keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours )
@@ -150,18 +152,32 @@ def exp_decay(global_step):
150152 Setting this value, allows consistency between reruns.
151153 continue_training: when continue_training is True, once initialized
152154 model will be continuely trained on every call of fit.
155+ num_cores: Number of cores to be used. (default: 4)
153156 early_stopping_rounds: Activates early stopping if this is not None.
154157 Loss needs to decrease at least every every <early_stopping_rounds>
155158 round(s) to continue training. (default: None)
156- """
159+ verbose: Controls the verbosity, possible values:
160+ 0: the algorithm and debug information is muted.
161+ 1: trainer prints the progress.
162+ 2: log device placement is printed.
163+ early_stopping_rounds: Activates early stopping if this is not None.
164+ Loss needs to decrease at least every every <early_stopping_rounds>
165+ round(s) to continue training. (default: None)
166+ max_to_keep: The maximum number of recent checkpoint files to keep.
167+ As new files are created, older files are deleted.
168+ If None or 0, all checkpoint files are kept.
169+ Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
170+ keep_checkpoint_every_n_hours: Number of hours between each checkpoint
171+ to be saved. The default value of 10,000 hours effectively disables the feature.
172+ """
157173
158174 def __init__ (self , rnn_size , cell_type = 'gru' , num_layers = 1 ,
159175 input_op_fn = null_input_op_fn , initial_state = None ,
160176 bidirectional = False , sequence_length = None ,
161177 n_classes = 0 , tf_master = "" , batch_size = 32 ,
162178 steps = 50 , optimizer = "SGD" , learning_rate = 0.1 ,
163179 tf_random_seed = 42 , continue_training = False ,
164- verbose = 1 , early_stopping_rounds = None ,
180+ num_cores = 4 , verbose = 1 , early_stopping_rounds = None ,
165181 max_to_keep = 5 , keep_checkpoint_every_n_hours = 10000 ):
166182 self .rnn_size = rnn_size
167183 self .cell_type = cell_type
@@ -175,7 +191,7 @@ def __init__(self, rnn_size, cell_type='gru', num_layers=1,
175191 n_classes = n_classes , tf_master = tf_master ,
176192 batch_size = batch_size , steps = steps , optimizer = optimizer ,
177193 learning_rate = learning_rate , tf_random_seed = tf_random_seed ,
178- continue_training = continue_training , verbose = verbose ,
194+ continue_training = continue_training , num_cores = num_cores , verbose = verbose ,
179195 early_stopping_rounds = early_stopping_rounds ,
180196 max_to_keep = max_to_keep ,
181197 keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours )
0 commit comments