Skip to content

Commit 357f30f

Browse files
Clearly demarcate contrib symbols from standard tf symbols by importing them directly.
PiperOrigin-RevId: 285503670
1 parent c71043d commit 357f30f

File tree

8 files changed

+62
-51
lines changed

8 files changed

+62
-51
lines changed

official/r1/boosted_trees/train_higgs.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
# pylint: enable=g-bad-import-order
5454

5555
from official.utils.flags import core as flags_core
56+
from tensorflow.contrib import estimator as contrib_estimator
5657
from official.utils.flags._conventions import help_wrap
5758
from official.utils.logs import logger
5859

@@ -229,7 +230,7 @@ def train_boosted_trees(flags_obj):
229230

230231
# Though BoostedTreesClassifier is under tf.estimator, faster in-memory
231232
# training is yet provided as a contrib library.
232-
classifier = tf.contrib.estimator.boosted_trees_classifier_train_in_memory(
233+
classifier = contrib_estimator.boosted_trees_classifier_train_in_memory(
233234
train_input_fn,
234235
feature_columns,
235236
model_dir=flags_obj.model_dir or None,

official/r1/mnist/mnist_eager.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
from absl import app as absl_app
3434
from absl import flags
3535
import tensorflow as tf
36+
from tensorflow.contrib import summary as contrib_summary
3637
from tensorflow.python import eager as tfe
3738
# pylint: enable=g-bad-import-order
3839

@@ -61,16 +62,16 @@ def train(model, optimizer, dataset, step_counter, log_interval=None):
6162

6263
start = time.time()
6364
for (batch, (images, labels)) in enumerate(dataset):
64-
with tf.contrib.summary.record_summaries_every_n_global_steps(
65+
with contrib_summary.record_summaries_every_n_global_steps(
6566
10, global_step=step_counter):
6667
# Record the operations used to compute the loss given the input,
6768
# so that the gradient of the loss with respect to the variables
6869
# can be computed.
6970
with tf.GradientTape() as tape:
7071
logits = model(images, training=True)
7172
loss_value = loss(logits, labels)
72-
tf.contrib.summary.scalar('loss', loss_value)
73-
tf.contrib.summary.scalar('accuracy', compute_accuracy(logits, labels))
73+
contrib_summary.scalar('loss', loss_value)
74+
contrib_summary.scalar('accuracy', compute_accuracy(logits, labels))
7475
grads = tape.gradient(loss_value, model.variables)
7576
optimizer.apply_gradients(
7677
zip(grads, model.variables), global_step=step_counter)
@@ -93,9 +94,9 @@ def test(model, dataset):
9394
tf.cast(labels, tf.int64))
9495
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
9596
(avg_loss.result(), 100 * accuracy.result()))
96-
with tf.contrib.summary.always_record_summaries():
97-
tf.contrib.summary.scalar('loss', avg_loss.result())
98-
tf.contrib.summary.scalar('accuracy', accuracy.result())
97+
with contrib_summary.always_record_summaries():
98+
contrib_summary.scalar('loss', avg_loss.result())
99+
contrib_summary.scalar('accuracy', accuracy.result())
99100

100101

101102
def run_mnist_eager(flags_obj):
@@ -137,9 +138,9 @@ def run_mnist_eager(flags_obj):
137138
else:
138139
train_dir = None
139140
test_dir = None
140-
summary_writer = tf.contrib.summary.create_file_writer(
141+
summary_writer = contrib_summary.create_file_writer(
141142
train_dir, flush_millis=10000)
142-
test_summary_writer = tf.contrib.summary.create_file_writer(
143+
test_summary_writer = contrib_summary.create_file_writer(
143144
test_dir, flush_millis=10000, name='test')
144145

145146
# Create and restore checkpoint (if one exists on the path)

official/r1/mnist/mnist_tpu.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@
3333

3434
# For open source environment, add grandparent directory for import
3535
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.path[0]))))
36+
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
37+
from tensorflow.contrib import tpu as contrib_tpu
3638

3739
from official.r1.mnist import dataset # pylint: disable=wrong-import-position
3840
from official.r1.mnist import mnist # pylint: disable=wrong-import-position
@@ -98,7 +100,7 @@ def model_fn(features, labels, mode, params):
98100
'class_ids': tf.argmax(logits, axis=1),
99101
'probabilities': tf.nn.softmax(logits),
100102
}
101-
return tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions)
103+
return contrib_tpu.TPUEstimatorSpec(mode, predictions=predictions)
102104

103105
logits = model(image, training=(mode == tf.estimator.ModeKeys.TRAIN))
104106
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
@@ -111,14 +113,14 @@ def model_fn(features, labels, mode, params):
111113
decay_rate=0.96)
112114
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
113115
if FLAGS.use_tpu:
114-
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
115-
return tf.contrib.tpu.TPUEstimatorSpec(
116+
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
117+
return contrib_tpu.TPUEstimatorSpec(
116118
mode=mode,
117119
loss=loss,
118120
train_op=optimizer.minimize(loss, tf.train.get_global_step()))
119121

120122
if mode == tf.estimator.ModeKeys.EVAL:
121-
return tf.contrib.tpu.TPUEstimatorSpec(
123+
return contrib_tpu.TPUEstimatorSpec(
122124
mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))
123125

124126

@@ -153,21 +155,18 @@ def main(argv):
153155
del argv # Unused.
154156
tf.logging.set_verbosity(tf.logging.INFO)
155157

156-
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
157-
FLAGS.tpu,
158-
zone=FLAGS.tpu_zone,
159-
project=FLAGS.gcp_project
160-
)
158+
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
159+
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
161160

162-
run_config = tf.contrib.tpu.RunConfig(
161+
run_config = contrib_tpu.RunConfig(
163162
cluster=tpu_cluster_resolver,
164163
model_dir=FLAGS.model_dir,
165164
session_config=tf.ConfigProto(
166165
allow_soft_placement=True, log_device_placement=True),
167-
tpu_config=tf.contrib.tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),
166+
tpu_config=contrib_tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),
168167
)
169168

170-
estimator = tf.contrib.tpu.TPUEstimator(
169+
estimator = contrib_tpu.TPUEstimator(
171170
model_fn=model_fn,
172171
use_tpu=FLAGS.use_tpu,
173172
train_batch_size=FLAGS.batch_size,

official/r1/resnet/resnet_run_loop.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030

3131
from absl import flags
3232
import tensorflow as tf
33+
from tensorflow.contrib import opt as contrib_opt
3334

3435
from official.r1.resnet import imagenet_preprocessing
3536
from official.r1.resnet import resnet_model
@@ -445,7 +446,7 @@ def exclude_batch_norm(name):
445446
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
446447

447448
if flags.FLAGS.enable_lars:
448-
optimizer = tf.contrib.opt.LARSOptimizer(
449+
optimizer = contrib_opt.LARSOptimizer(
449450
learning_rate,
450451
momentum=momentum,
451452
weight_decay=weight_decay,

official/r1/utils/tpu.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
"""Functions specific to running TensorFlow on TPUs."""
1616

1717
import tensorflow as tf
18+
from tensorflow.contrib import summary as contrib_summary
1819

1920

2021
# "local" is a magic word in the TPU cluster resolver; it informs the resolver
@@ -58,13 +59,13 @@ def host_call_fn(global_step, *args):
5859
List of summary ops to run on the CPU host.
5960
"""
6061
step = global_step[0]
61-
with tf.contrib.summary.create_file_writer(
62+
with contrib_summary.create_file_writer(
6263
logdir=model_dir, filename_suffix=".host_call").as_default():
63-
with tf.contrib.summary.always_record_summaries():
64+
with contrib_summary.always_record_summaries():
6465
for i, name in enumerate(metric_names):
65-
tf.contrib.summary.scalar(prefix + name, args[i][0], step=step)
66+
contrib_summary.scalar(prefix + name, args[i][0], step=step)
6667

67-
return tf.contrib.summary.all_summary_ops()
68+
return contrib_summary.all_summary_ops()
6869

6970
# To log the current learning rate, and gradient norm for Tensorboard, the
7071
# summary op needs to be run on the host CPU via host_call. host_call

official/recommendation/neumf_model.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737

3838
from six.moves import xrange # pylint: disable=redefined-builtin
3939
import tensorflow as tf
40+
from tensorflow.contrib import tpu as contrib_tpu
4041

4142
from official.recommendation import constants as rconst
4243
from official.recommendation import movielens
@@ -116,7 +117,7 @@ def neumf_model_fn(features, labels, mode, params):
116117
epsilon=params["epsilon"])
117118
if params["use_tpu"]:
118119
# TODO(seemuch): remove this contrib import
119-
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
120+
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
120121

121122
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.MODEL_HP_LOSS_FN,
122123
value=mlperf_helper.TAGS.BCE)
@@ -274,7 +275,7 @@ def _get_estimator_spec_with_metrics(logits, # type: tf.Tensor
274275
use_tpu_spec)
275276

276277
if use_tpu_spec:
277-
return tf.contrib.tpu.TPUEstimatorSpec(
278+
return contrib_tpu.TPUEstimatorSpec(
278279
mode=tf.estimator.ModeKeys.EVAL,
279280
loss=cross_entropy,
280281
eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights]))

official/transformer/transformer_main.py

Lines changed: 23 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@
3333
# pylint: enable=g-bad-import-order
3434

3535
from official.r1.utils import export
36+
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
37+
from tensorflow.contrib import opt as contrib_opt
38+
from tensorflow.contrib import tpu as contrib_tpu
3639
from official.r1.utils import tpu as tpu_util
3740
from official.transformer import compute_bleu
3841
from official.transformer import translate
@@ -115,8 +118,10 @@ def model_fn(features, labels, mode, params):
115118
metric_fn = lambda logits, labels: (
116119
metrics.get_eval_metrics(logits, labels, params=params))
117120
eval_metrics = (metric_fn, [logits, labels])
118-
return tf.contrib.tpu.TPUEstimatorSpec(
119-
mode=mode, loss=loss, predictions={"predictions": logits},
121+
return contrib_tpu.TPUEstimatorSpec(
122+
mode=mode,
123+
loss=loss,
124+
predictions={"predictions": logits},
120125
eval_metrics=eval_metrics)
121126
return tf.estimator.EstimatorSpec(
122127
mode=mode, loss=loss, predictions={"predictions": logits},
@@ -128,12 +133,14 @@ def model_fn(features, labels, mode, params):
128133
# in TensorBoard.
129134
metric_dict["minibatch_loss"] = loss
130135
if params["use_tpu"]:
131-
return tf.contrib.tpu.TPUEstimatorSpec(
132-
mode=mode, loss=loss, train_op=train_op,
136+
return contrib_tpu.TPUEstimatorSpec(
137+
mode=mode,
138+
loss=loss,
139+
train_op=train_op,
133140
host_call=tpu_util.construct_scalar_host_call(
134-
metric_dict=metric_dict, model_dir=params["model_dir"],
135-
prefix="training/")
136-
)
141+
metric_dict=metric_dict,
142+
model_dir=params["model_dir"],
143+
prefix="training/"))
137144
record_scalars(metric_dict)
138145
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
139146

@@ -173,14 +180,14 @@ def get_train_op_and_metrics(loss, params):
173180

174181
# Create optimizer. Use LazyAdamOptimizer from TF contrib, which is faster
175182
# than the TF core Adam optimizer.
176-
optimizer = tf.contrib.opt.LazyAdamOptimizer(
183+
optimizer = contrib_opt.LazyAdamOptimizer(
177184
learning_rate,
178185
beta1=params["optimizer_adam_beta1"],
179186
beta2=params["optimizer_adam_beta2"],
180187
epsilon=params["optimizer_adam_epsilon"])
181188

182189
if params["use_tpu"] and params["tpu"] != tpu_util.LOCAL:
183-
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
190+
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
184191

185192
# Uses automatic mixed precision FP16 training if on GPU.
186193
if params["dtype"] == "fp16":
@@ -528,31 +535,31 @@ def construct_estimator(flags_obj, params, schedule_manager):
528535
model_fn=model_fn, model_dir=flags_obj.model_dir, params=params,
529536
config=tf.estimator.RunConfig(train_distribute=distribution_strategy))
530537

531-
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
538+
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
532539
tpu=flags_obj.tpu,
533540
zone=flags_obj.tpu_zone,
534-
project=flags_obj.tpu_gcp_project
535-
)
541+
project=flags_obj.tpu_gcp_project)
536542

537-
tpu_config = tf.contrib.tpu.TPUConfig(
543+
tpu_config = contrib_tpu.TPUConfig(
538544
iterations_per_loop=schedule_manager.single_iteration_train_steps,
539545
num_shards=flags_obj.num_tpu_shards)
540546

541-
run_config = tf.contrib.tpu.RunConfig(
547+
run_config = contrib_tpu.RunConfig(
542548
cluster=tpu_cluster_resolver,
543549
model_dir=flags_obj.model_dir,
544550
session_config=tf.ConfigProto(
545551
allow_soft_placement=True, log_device_placement=True),
546552
tpu_config=tpu_config)
547553

548-
return tf.contrib.tpu.TPUEstimator(
554+
return contrib_tpu.TPUEstimator(
549555
model_fn=model_fn,
550556
use_tpu=params["use_tpu"] and flags_obj.tpu != tpu_util.LOCAL,
551557
train_batch_size=schedule_manager.batch_size,
552558
eval_batch_size=schedule_manager.batch_size,
553559
params={
554560
# TPUEstimator needs to populate batch_size itself due to sharding.
555-
key: value for key, value in params.items() if key != "batch_size"},
561+
key: value for key, value in params.items() if key != "batch_size"
562+
},
556563
config=run_config)
557564

558565

official/utils/misc/distribution_utils.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
import random
2424
import string
2525
import tensorflow as tf
26+
from tensorflow.contrib import distribute as contrib_distribute
2627

2728
from official.utils.misc import tpu_lib
2829

@@ -285,10 +286,9 @@ def set_up_synthetic_data():
285286
tf.distribute.experimental.MultiWorkerMirroredStrategy)
286287
# TODO(tobyboyd): Remove when contrib.distribute is all in core.
287288
if hasattr(tf, 'contrib'):
288-
_monkey_patch_dataset_method(tf.contrib.distribute.MirroredStrategy)
289-
_monkey_patch_dataset_method(tf.contrib.distribute.OneDeviceStrategy)
290-
_monkey_patch_dataset_method(
291-
tf.contrib.distribute.CollectiveAllReduceStrategy)
289+
_monkey_patch_dataset_method(contrib_distribute.MirroredStrategy)
290+
_monkey_patch_dataset_method(contrib_distribute.OneDeviceStrategy)
291+
_monkey_patch_dataset_method(contrib_distribute.CollectiveAllReduceStrategy)
292292
else:
293293
print('Contrib missing: Skip monkey patch tf.contrib.distribute.*')
294294

@@ -300,10 +300,10 @@ def undo_set_up_synthetic_data():
300300
tf.distribute.experimental.MultiWorkerMirroredStrategy)
301301
# TODO(tobyboyd): Remove when contrib.distribute is all in core.
302302
if hasattr(tf, 'contrib'):
303-
_undo_monkey_patch_dataset_method(tf.contrib.distribute.MirroredStrategy)
304-
_undo_monkey_patch_dataset_method(tf.contrib.distribute.OneDeviceStrategy)
303+
_undo_monkey_patch_dataset_method(contrib_distribute.MirroredStrategy)
304+
_undo_monkey_patch_dataset_method(contrib_distribute.OneDeviceStrategy)
305305
_undo_monkey_patch_dataset_method(
306-
tf.contrib.distribute.CollectiveAllReduceStrategy)
306+
contrib_distribute.CollectiveAllReduceStrategy)
307307
else:
308308
print('Contrib missing: Skip remove monkey patch tf.contrib.distribute.*')
309309

0 commit comments

Comments
 (0)