Skip to content
This repository was archived by the owner on Aug 31, 2021. It is now read-only.

Commit 61ac580

Browse files
committed
adding num_cores parameter to all the estimators
1 parent 79eed03 commit 61ac580

File tree

3 files changed

+45
-14
lines changed

3 files changed

+45
-14
lines changed

skflow/estimators/dnn.py

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ def exp_decay(global_step):
4747
Setting this value, allows consistency between reruns.
4848
continue_training: when continue_training is True, once initialized
4949
model will be continuely trained on every call of fit.
50+
num_cores: Number of cores to be used. (default: 4)
5051
early_stopping_rounds: Activates early stopping if this is not None.
5152
Loss needs to decrease at least every every <early_stopping_rounds>
5253
round(s) to continue training. (default: None)
@@ -61,7 +62,7 @@ def exp_decay(global_step):
6162
def __init__(self, hidden_units, n_classes, tf_master="", batch_size=32,
6263
steps=200, optimizer="SGD", learning_rate=0.1,
6364
class_weight=None,
64-
tf_random_seed=42, continue_training=False,
65+
tf_random_seed=42, continue_training=False, num_cores=4,
6566
verbose=1, early_stopping_rounds=None,
6667
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
6768
self.hidden_units = hidden_units
@@ -71,7 +72,7 @@ def __init__(self, hidden_units, n_classes, tf_master="", batch_size=32,
7172
batch_size=batch_size, steps=steps, optimizer=optimizer,
7273
learning_rate=learning_rate, class_weight=class_weight,
7374
tf_random_seed=tf_random_seed,
74-
continue_training=continue_training, verbose=verbose,
75+
continue_training=continue_training, num_cores=4, verbose=verbose,
7576
early_stopping_rounds=early_stopping_rounds,
7677
max_to_keep=max_to_keep,
7778
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
@@ -121,14 +122,28 @@ def exp_decay(global_step):
121122
Setting this value, allows consistency between reruns.
122123
continue_training: when continue_training is True, once initialized
123124
model will be continuely trained on every call of fit.
125+
num_cores: Number of cores to be used. (default: 4)
124126
early_stopping_rounds: Activates early stopping if this is not None.
125127
Loss needs to decrease at least every every <early_stopping_rounds>
126128
round(s) to continue training. (default: None)
127-
"""
129+
verbose: Controls the verbosity, possible values:
130+
0: the algorithm and debug information is muted.
131+
1: trainer prints the progress.
132+
2: log device placement is printed.
133+
early_stopping_rounds: Activates early stopping if this is not None.
134+
Loss needs to decrease at least every every <early_stopping_rounds>
135+
round(s) to continue training. (default: None)
136+
max_to_keep: The maximum number of recent checkpoint files to keep.
137+
As new files are created, older files are deleted.
138+
If None or 0, all checkpoint files are kept.
139+
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
140+
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
141+
to be saved. The default value of 10,000 hours effectively disables the feature.
142+
"""
128143

129144
def __init__(self, hidden_units, n_classes=0, tf_master="", batch_size=32,
130145
steps=200, optimizer="SGD", learning_rate=0.1,
131-
tf_random_seed=42, continue_training=False,
146+
tf_random_seed=42, continue_training=False, num_cores=4,
132147
verbose=1, early_stopping_rounds=None,
133148
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
134149
self.hidden_units = hidden_units
@@ -137,7 +152,7 @@ def __init__(self, hidden_units, n_classes=0, tf_master="", batch_size=32,
137152
n_classes=n_classes, tf_master=tf_master,
138153
batch_size=batch_size, steps=steps, optimizer=optimizer,
139154
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
140-
continue_training=continue_training, verbose=verbose,
155+
continue_training=continue_training, num_cores=num_cores, verbose=verbose,
141156
early_stopping_rounds=early_stopping_rounds,
142157
max_to_keep=max_to_keep,
143158
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)

skflow/estimators/linear.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,14 @@ class TensorFlowLinearRegressor(TensorFlowEstimator, RegressorMixin):
2626

2727
def __init__(self, n_classes=0, tf_master="", batch_size=32, steps=200, optimizer="SGD",
2828
learning_rate=0.1, tf_random_seed=42, continue_training=False,
29-
verbose=1, early_stopping_rounds=None,
29+
num_cores=4, verbose=1, early_stopping_rounds=None,
3030
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
3131
super(TensorFlowLinearRegressor, self).__init__(
3232
model_fn=models.linear_regression, n_classes=n_classes,
3333
tf_master=tf_master,
3434
batch_size=batch_size, steps=steps, optimizer=optimizer,
3535
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
36-
continue_training=continue_training,
36+
continue_training=continue_training, num_cores=num_cores,
3737
verbose=verbose, early_stopping_rounds=early_stopping_rounds,
3838
max_to_keep=max_to_keep,
3939
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
@@ -54,7 +54,7 @@ class TensorFlowLinearClassifier(TensorFlowEstimator, ClassifierMixin):
5454

5555
def __init__(self, n_classes, tf_master="", batch_size=32, steps=200, optimizer="SGD",
5656
learning_rate=0.1, class_weight=None,
57-
tf_random_seed=42, continue_training=False,
57+
tf_random_seed=42, continue_training=False, num_cores=4,
5858
verbose=1, early_stopping_rounds=None,
5959
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
6060
super(TensorFlowLinearClassifier, self).__init__(
@@ -63,7 +63,7 @@ def __init__(self, n_classes, tf_master="", batch_size=32, steps=200, optimizer=
6363
batch_size=batch_size, steps=steps, optimizer=optimizer,
6464
learning_rate=learning_rate, class_weight=class_weight,
6565
tf_random_seed=tf_random_seed,
66-
continue_training=continue_training,
66+
continue_training=continue_training, num_cores=num_cores,
6767
verbose=verbose, early_stopping_rounds=early_stopping_rounds,
6868
max_to_keep=max_to_keep,
6969
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)

skflow/estimators/rnn.py

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ def exp_decay(global_step):
6161
Setting this value, allows consistency between reruns.
6262
continue_training: when continue_training is True, once initialized
6363
model will be continuely trained on every call of fit.
64+
num_cores: Number of cores to be used. (default: 4)
6465
early_stopping_rounds: Activates early stopping if this is not None.
6566
Loss needs to decrease at least every every <early_stopping_rounds>
6667
round(s) to continue training. (default: None)
@@ -79,7 +80,7 @@ def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
7980
steps=50, optimizer="SGD", learning_rate=0.1,
8081
class_weight=None,
8182
tf_random_seed=42, continue_training=False,
82-
verbose=1, early_stopping_rounds=None,
83+
num_cores=4, verbose=1, early_stopping_rounds=None,
8384
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
8485
self.rnn_size = rnn_size
8586
self.cell_type = cell_type
@@ -94,7 +95,8 @@ def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
9495
batch_size=batch_size, steps=steps, optimizer=optimizer,
9596
learning_rate=learning_rate, class_weight=class_weight,
9697
tf_random_seed=tf_random_seed,
97-
continue_training=continue_training, verbose=verbose,
98+
continue_training=continue_training, num_cores=num_cores,
99+
verbose=verbose,
98100
early_stopping_rounds=early_stopping_rounds,
99101
max_to_keep=max_to_keep,
100102
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
@@ -150,18 +152,32 @@ def exp_decay(global_step):
150152
Setting this value, allows consistency between reruns.
151153
continue_training: when continue_training is True, once initialized
152154
model will be continuely trained on every call of fit.
155+
num_cores: Number of cores to be used. (default: 4)
153156
early_stopping_rounds: Activates early stopping if this is not None.
154157
Loss needs to decrease at least every every <early_stopping_rounds>
155158
round(s) to continue training. (default: None)
156-
"""
159+
verbose: Controls the verbosity, possible values:
160+
0: the algorithm and debug information is muted.
161+
1: trainer prints the progress.
162+
2: log device placement is printed.
163+
early_stopping_rounds: Activates early stopping if this is not None.
164+
Loss needs to decrease at least every every <early_stopping_rounds>
165+
round(s) to continue training. (default: None)
166+
max_to_keep: The maximum number of recent checkpoint files to keep.
167+
As new files are created, older files are deleted.
168+
If None or 0, all checkpoint files are kept.
169+
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
170+
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
171+
to be saved. The default value of 10,000 hours effectively disables the feature.
172+
"""
157173

158174
def __init__(self, rnn_size, cell_type='gru', num_layers=1,
159175
input_op_fn=null_input_op_fn, initial_state=None,
160176
bidirectional=False, sequence_length=None,
161177
n_classes=0, tf_master="", batch_size=32,
162178
steps=50, optimizer="SGD", learning_rate=0.1,
163179
tf_random_seed=42, continue_training=False,
164-
verbose=1, early_stopping_rounds=None,
180+
num_cores=4, verbose=1, early_stopping_rounds=None,
165181
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
166182
self.rnn_size = rnn_size
167183
self.cell_type = cell_type
@@ -175,7 +191,7 @@ def __init__(self, rnn_size, cell_type='gru', num_layers=1,
175191
n_classes=n_classes, tf_master=tf_master,
176192
batch_size=batch_size, steps=steps, optimizer=optimizer,
177193
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
178-
continue_training=continue_training, verbose=verbose,
194+
continue_training=continue_training, num_cores=num_cores, verbose=verbose,
179195
early_stopping_rounds=early_stopping_rounds,
180196
max_to_keep=max_to_keep,
181197
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)

0 commit comments

Comments
 (0)