|
14 | 14 | flags.DEFINE_float("learning_rate", 1e-3, "Learning Rate") |
15 | 15 | flags.DEFINE_boolean("use_tpu", True, " Use TPU") |
16 | 16 | flags.DEFINE_boolean("use_compat", True, "Use OptimizerV1 from compat module") |
| 17 | +flags.DEFINE_integer("max_steps", 1000, "Maximum Number of Steps for TPU Estimator") |
17 | 18 | flags.DEFINE_string( |
18 | 19 | "model_dir", |
19 | 20 | "model_dir/", |
@@ -67,7 +68,7 @@ def model_fn(features, labels, mode, params): |
67 | 68 | else: |
68 | 69 | optimizer = tf.compat.v1.train.AdamOptimizer( |
69 | 70 | params["learning_rate"]) |
70 | | - if params.get["use_tpu"]: |
| 71 | + if params["use_tpu"]: |
71 | 72 | optimizer = tpu_optimizer.CrossShardOptimizer(optimizer) |
72 | 73 |
|
73 | 74 | with tf.GradientTape() as tape: |
@@ -95,7 +96,7 @@ def train_fn(use_compat): |
95 | 96 | zip(gradient, model.trainable_variables)) |
96 | 97 | else: |
97 | 98 | apply_grads = optimizer.apply_gradients( |
98 | | - zip(gradient, model_trainable_variables), |
| 99 | + zip(gradient, model.trainable_variables), |
99 | 100 | global_step=global_step) |
100 | 101 | return apply_grads |
101 | 102 |
|
@@ -130,12 +131,14 @@ def main(_): |
130 | 131 | "learning_rate": FLAGS.learning_rate |
131 | 132 | } |
132 | 133 | ) |
133 | | - |
134 | | - classifier.train( |
135 | | - input_fn=lambda params: input_fn( |
136 | | - mode=tf.estimator.ModeKeys.TRAIN, |
137 | | - **params), |
138 | | - max_steps=None, steps=None) |
| 134 | + try: |
| 135 | + classifier.train( |
| 136 | + input_fn=lambda params: input_fn( |
| 137 | + mode=tf.estimator.ModeKeys.TRAIN, |
| 138 | + **params), |
| 139 | + max_steps=FLAGS.max_steps) |
| 140 | + except Exception: |
| 141 | + pass |
139 | 142 | # TODO(@captain-pool): Implement Evaluation |
140 | 143 | if FLAGS.infer: |
141 | 144 | def prepare_input_fn(path): |
|
0 commit comments