Skip to content

Commit 0788a23

Browse files
saberkuntensorflower-gardener
authored andcommitted
Internal change
PiperOrigin-RevId: 285533511
1 parent 357f30f commit 0788a23

File tree

8 files changed

+51
-62
lines changed

8 files changed

+51
-62
lines changed

official/r1/boosted_trees/train_higgs.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@
5353
# pylint: enable=g-bad-import-order
5454

5555
from official.utils.flags import core as flags_core
56-
from tensorflow.contrib import estimator as contrib_estimator
5756
from official.utils.flags._conventions import help_wrap
5857
from official.utils.logs import logger
5958

@@ -230,7 +229,7 @@ def train_boosted_trees(flags_obj):
230229

231230
# Though BoostedTreesClassifier is under tf.estimator, faster in-memory
232231
# training is yet provided as a contrib library.
233-
classifier = contrib_estimator.boosted_trees_classifier_train_in_memory(
232+
classifier = tf.contrib.estimator.boosted_trees_classifier_train_in_memory(
234233
train_input_fn,
235234
feature_columns,
236235
model_dir=flags_obj.model_dir or None,

official/r1/mnist/mnist_eager.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
from absl import app as absl_app
3434
from absl import flags
3535
import tensorflow as tf
36-
from tensorflow.contrib import summary as contrib_summary
3736
from tensorflow.python import eager as tfe
3837
# pylint: enable=g-bad-import-order
3938

@@ -62,16 +61,16 @@ def train(model, optimizer, dataset, step_counter, log_interval=None):
6261

6362
start = time.time()
6463
for (batch, (images, labels)) in enumerate(dataset):
65-
with contrib_summary.record_summaries_every_n_global_steps(
64+
with tf.contrib.summary.record_summaries_every_n_global_steps(
6665
10, global_step=step_counter):
6766
# Record the operations used to compute the loss given the input,
6867
# so that the gradient of the loss with respect to the variables
6968
# can be computed.
7069
with tf.GradientTape() as tape:
7170
logits = model(images, training=True)
7271
loss_value = loss(logits, labels)
73-
contrib_summary.scalar('loss', loss_value)
74-
contrib_summary.scalar('accuracy', compute_accuracy(logits, labels))
72+
tf.contrib.summary.scalar('loss', loss_value)
73+
tf.contrib.summary.scalar('accuracy', compute_accuracy(logits, labels))
7574
grads = tape.gradient(loss_value, model.variables)
7675
optimizer.apply_gradients(
7776
zip(grads, model.variables), global_step=step_counter)
@@ -94,9 +93,9 @@ def test(model, dataset):
9493
tf.cast(labels, tf.int64))
9594
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
9695
(avg_loss.result(), 100 * accuracy.result()))
97-
with contrib_summary.always_record_summaries():
98-
contrib_summary.scalar('loss', avg_loss.result())
99-
contrib_summary.scalar('accuracy', accuracy.result())
96+
with tf.contrib.summary.always_record_summaries():
97+
tf.contrib.summary.scalar('loss', avg_loss.result())
98+
tf.contrib.summary.scalar('accuracy', accuracy.result())
10099

101100

102101
def run_mnist_eager(flags_obj):
@@ -138,9 +137,9 @@ def run_mnist_eager(flags_obj):
138137
else:
139138
train_dir = None
140139
test_dir = None
141-
summary_writer = contrib_summary.create_file_writer(
140+
summary_writer = tf.contrib.summary.create_file_writer(
142141
train_dir, flush_millis=10000)
143-
test_summary_writer = contrib_summary.create_file_writer(
142+
test_summary_writer = tf.contrib.summary.create_file_writer(
144143
test_dir, flush_millis=10000, name='test')
145144

146145
# Create and restore checkpoint (if one exists on the path)

official/r1/mnist/mnist_tpu.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,6 @@
3333

3434
# For open source environment, add grandparent directory for import
3535
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.path[0]))))
36-
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
37-
from tensorflow.contrib import tpu as contrib_tpu
3836

3937
from official.r1.mnist import dataset # pylint: disable=wrong-import-position
4038
from official.r1.mnist import mnist # pylint: disable=wrong-import-position
@@ -100,7 +98,7 @@ def model_fn(features, labels, mode, params):
10098
'class_ids': tf.argmax(logits, axis=1),
10199
'probabilities': tf.nn.softmax(logits),
102100
}
103-
return contrib_tpu.TPUEstimatorSpec(mode, predictions=predictions)
101+
return tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions)
104102

105103
logits = model(image, training=(mode == tf.estimator.ModeKeys.TRAIN))
106104
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
@@ -113,14 +111,14 @@ def model_fn(features, labels, mode, params):
113111
decay_rate=0.96)
114112
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
115113
if FLAGS.use_tpu:
116-
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
117-
return contrib_tpu.TPUEstimatorSpec(
114+
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
115+
return tf.contrib.tpu.TPUEstimatorSpec(
118116
mode=mode,
119117
loss=loss,
120118
train_op=optimizer.minimize(loss, tf.train.get_global_step()))
121119

122120
if mode == tf.estimator.ModeKeys.EVAL:
123-
return contrib_tpu.TPUEstimatorSpec(
121+
return tf.contrib.tpu.TPUEstimatorSpec(
124122
mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))
125123

126124

@@ -155,18 +153,21 @@ def main(argv):
155153
del argv # Unused.
156154
tf.logging.set_verbosity(tf.logging.INFO)
157155

158-
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
159-
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
156+
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
157+
FLAGS.tpu,
158+
zone=FLAGS.tpu_zone,
159+
project=FLAGS.gcp_project
160+
)
160161

161-
run_config = contrib_tpu.RunConfig(
162+
run_config = tf.contrib.tpu.RunConfig(
162163
cluster=tpu_cluster_resolver,
163164
model_dir=FLAGS.model_dir,
164165
session_config=tf.ConfigProto(
165166
allow_soft_placement=True, log_device_placement=True),
166-
tpu_config=contrib_tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),
167+
tpu_config=tf.contrib.tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),
167168
)
168169

169-
estimator = contrib_tpu.TPUEstimator(
170+
estimator = tf.contrib.tpu.TPUEstimator(
170171
model_fn=model_fn,
171172
use_tpu=FLAGS.use_tpu,
172173
train_batch_size=FLAGS.batch_size,

official/r1/resnet/resnet_run_loop.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030

3131
from absl import flags
3232
import tensorflow as tf
33-
from tensorflow.contrib import opt as contrib_opt
3433

3534
from official.r1.resnet import imagenet_preprocessing
3635
from official.r1.resnet import resnet_model
@@ -446,7 +445,7 @@ def exclude_batch_norm(name):
446445
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
447446

448447
if flags.FLAGS.enable_lars:
449-
optimizer = contrib_opt.LARSOptimizer(
448+
optimizer = tf.contrib.opt.LARSOptimizer(
450449
learning_rate,
451450
momentum=momentum,
452451
weight_decay=weight_decay,

official/r1/utils/tpu.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
"""Functions specific to running TensorFlow on TPUs."""
1616

1717
import tensorflow as tf
18-
from tensorflow.contrib import summary as contrib_summary
1918

2019

2120
# "local" is a magic word in the TPU cluster resolver; it informs the resolver
@@ -59,13 +58,13 @@ def host_call_fn(global_step, *args):
5958
List of summary ops to run on the CPU host.
6059
"""
6160
step = global_step[0]
62-
with contrib_summary.create_file_writer(
61+
with tf.contrib.summary.create_file_writer(
6362
logdir=model_dir, filename_suffix=".host_call").as_default():
64-
with contrib_summary.always_record_summaries():
63+
with tf.contrib.summary.always_record_summaries():
6564
for i, name in enumerate(metric_names):
66-
contrib_summary.scalar(prefix + name, args[i][0], step=step)
65+
tf.contrib.summary.scalar(prefix + name, args[i][0], step=step)
6766

68-
return contrib_summary.all_summary_ops()
67+
return tf.contrib.summary.all_summary_ops()
6968

7069
# To log the current learning rate, and gradient norm for Tensorboard, the
7170
# summary op needs to be run on the host CPU via host_call. host_call

official/recommendation/neumf_model.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737

3838
from six.moves import xrange # pylint: disable=redefined-builtin
3939
import tensorflow as tf
40-
from tensorflow.contrib import tpu as contrib_tpu
4140

4241
from official.recommendation import constants as rconst
4342
from official.recommendation import movielens
@@ -117,7 +116,7 @@ def neumf_model_fn(features, labels, mode, params):
117116
epsilon=params["epsilon"])
118117
if params["use_tpu"]:
119118
# TODO(seemuch): remove this contrib import
120-
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
119+
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
121120

122121
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.MODEL_HP_LOSS_FN,
123122
value=mlperf_helper.TAGS.BCE)
@@ -275,7 +274,7 @@ def _get_estimator_spec_with_metrics(logits, # type: tf.Tensor
275274
use_tpu_spec)
276275

277276
if use_tpu_spec:
278-
return contrib_tpu.TPUEstimatorSpec(
277+
return tf.contrib.tpu.TPUEstimatorSpec(
279278
mode=tf.estimator.ModeKeys.EVAL,
280279
loss=cross_entropy,
281280
eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights]))

official/transformer/transformer_main.py

Lines changed: 16 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,6 @@
3333
# pylint: enable=g-bad-import-order
3434

3535
from official.r1.utils import export
36-
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
37-
from tensorflow.contrib import opt as contrib_opt
38-
from tensorflow.contrib import tpu as contrib_tpu
3936
from official.r1.utils import tpu as tpu_util
4037
from official.transformer import compute_bleu
4138
from official.transformer import translate
@@ -118,10 +115,8 @@ def model_fn(features, labels, mode, params):
118115
metric_fn = lambda logits, labels: (
119116
metrics.get_eval_metrics(logits, labels, params=params))
120117
eval_metrics = (metric_fn, [logits, labels])
121-
return contrib_tpu.TPUEstimatorSpec(
122-
mode=mode,
123-
loss=loss,
124-
predictions={"predictions": logits},
118+
return tf.contrib.tpu.TPUEstimatorSpec(
119+
mode=mode, loss=loss, predictions={"predictions": logits},
125120
eval_metrics=eval_metrics)
126121
return tf.estimator.EstimatorSpec(
127122
mode=mode, loss=loss, predictions={"predictions": logits},
@@ -133,14 +128,12 @@ def model_fn(features, labels, mode, params):
133128
# in TensorBoard.
134129
metric_dict["minibatch_loss"] = loss
135130
if params["use_tpu"]:
136-
return contrib_tpu.TPUEstimatorSpec(
137-
mode=mode,
138-
loss=loss,
139-
train_op=train_op,
131+
return tf.contrib.tpu.TPUEstimatorSpec(
132+
mode=mode, loss=loss, train_op=train_op,
140133
host_call=tpu_util.construct_scalar_host_call(
141-
metric_dict=metric_dict,
142-
model_dir=params["model_dir"],
143-
prefix="training/"))
134+
metric_dict=metric_dict, model_dir=params["model_dir"],
135+
prefix="training/")
136+
)
144137
record_scalars(metric_dict)
145138
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
146139

@@ -180,14 +173,14 @@ def get_train_op_and_metrics(loss, params):
180173

181174
# Create optimizer. Use LazyAdamOptimizer from TF contrib, which is faster
182175
# than the TF core Adam optimizer.
183-
optimizer = contrib_opt.LazyAdamOptimizer(
176+
optimizer = tf.contrib.opt.LazyAdamOptimizer(
184177
learning_rate,
185178
beta1=params["optimizer_adam_beta1"],
186179
beta2=params["optimizer_adam_beta2"],
187180
epsilon=params["optimizer_adam_epsilon"])
188181

189182
if params["use_tpu"] and params["tpu"] != tpu_util.LOCAL:
190-
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
183+
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
191184

192185
# Uses automatic mixed precision FP16 training if on GPU.
193186
if params["dtype"] == "fp16":
@@ -535,31 +528,31 @@ def construct_estimator(flags_obj, params, schedule_manager):
535528
model_fn=model_fn, model_dir=flags_obj.model_dir, params=params,
536529
config=tf.estimator.RunConfig(train_distribute=distribution_strategy))
537530

538-
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
531+
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
539532
tpu=flags_obj.tpu,
540533
zone=flags_obj.tpu_zone,
541-
project=flags_obj.tpu_gcp_project)
534+
project=flags_obj.tpu_gcp_project
535+
)
542536

543-
tpu_config = contrib_tpu.TPUConfig(
537+
tpu_config = tf.contrib.tpu.TPUConfig(
544538
iterations_per_loop=schedule_manager.single_iteration_train_steps,
545539
num_shards=flags_obj.num_tpu_shards)
546540

547-
run_config = contrib_tpu.RunConfig(
541+
run_config = tf.contrib.tpu.RunConfig(
548542
cluster=tpu_cluster_resolver,
549543
model_dir=flags_obj.model_dir,
550544
session_config=tf.ConfigProto(
551545
allow_soft_placement=True, log_device_placement=True),
552546
tpu_config=tpu_config)
553547

554-
return contrib_tpu.TPUEstimator(
548+
return tf.contrib.tpu.TPUEstimator(
555549
model_fn=model_fn,
556550
use_tpu=params["use_tpu"] and flags_obj.tpu != tpu_util.LOCAL,
557551
train_batch_size=schedule_manager.batch_size,
558552
eval_batch_size=schedule_manager.batch_size,
559553
params={
560554
# TPUEstimator needs to populate batch_size itself due to sharding.
561-
key: value for key, value in params.items() if key != "batch_size"
562-
},
555+
key: value for key, value in params.items() if key != "batch_size"},
563556
config=run_config)
564557

565558

official/utils/misc/distribution_utils.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
import random
2424
import string
2525
import tensorflow as tf
26-
from tensorflow.contrib import distribute as contrib_distribute
2726

2827
from official.utils.misc import tpu_lib
2928

@@ -286,9 +285,10 @@ def set_up_synthetic_data():
286285
tf.distribute.experimental.MultiWorkerMirroredStrategy)
287286
# TODO(tobyboyd): Remove when contrib.distribute is all in core.
288287
if hasattr(tf, 'contrib'):
289-
_monkey_patch_dataset_method(contrib_distribute.MirroredStrategy)
290-
_monkey_patch_dataset_method(contrib_distribute.OneDeviceStrategy)
291-
_monkey_patch_dataset_method(contrib_distribute.CollectiveAllReduceStrategy)
288+
_monkey_patch_dataset_method(tf.contrib.distribute.MirroredStrategy)
289+
_monkey_patch_dataset_method(tf.contrib.distribute.OneDeviceStrategy)
290+
_monkey_patch_dataset_method(
291+
tf.contrib.distribute.CollectiveAllReduceStrategy)
292292
else:
293293
print('Contrib missing: Skip monkey patch tf.contrib.distribute.*')
294294

@@ -300,10 +300,10 @@ def undo_set_up_synthetic_data():
300300
tf.distribute.experimental.MultiWorkerMirroredStrategy)
301301
# TODO(tobyboyd): Remove when contrib.distribute is all in core.
302302
if hasattr(tf, 'contrib'):
303-
_undo_monkey_patch_dataset_method(contrib_distribute.MirroredStrategy)
304-
_undo_monkey_patch_dataset_method(contrib_distribute.OneDeviceStrategy)
303+
_undo_monkey_patch_dataset_method(tf.contrib.distribute.MirroredStrategy)
304+
_undo_monkey_patch_dataset_method(tf.contrib.distribute.OneDeviceStrategy)
305305
_undo_monkey_patch_dataset_method(
306-
contrib_distribute.CollectiveAllReduceStrategy)
306+
tf.contrib.distribute.CollectiveAllReduceStrategy)
307307
else:
308308
print('Contrib missing: Skip remove monkey patch tf.contrib.distribute.*')
309309

0 commit comments

Comments
 (0)