Skip to content

Commit e8c32e8

Browse files
saberkunallenwang28
authored andcommitted
Internal change
PiperOrigin-RevId: 306136273
1 parent aff514a commit e8c32e8

File tree

2 files changed

+30
-6
lines changed

2 files changed

+30
-6
lines changed

official/benchmark/keras_imagenet_benchmark.py

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
# limitations under the License.
1515
# ==============================================================================
1616
"""Executes Keras benchmarks and accuracy tests."""
17+
# pylint: disable=line-too-long
1718
from __future__ import print_function
1819

1920
import json
@@ -266,7 +267,6 @@ def _run_and_report_benchmark(
266267
dataset_num_private_threads: Optional[int] = None,
267268
loss_scale: Optional[str] = None):
268269
"""Runs and reports the benchmark given the provided configuration."""
269-
self._setup()
270270
FLAGS.model_type = 'resnet'
271271
FLAGS.dataset = 'imagenet'
272272
FLAGS.mode = 'train_and_eval'
@@ -449,7 +449,6 @@ def _run_and_report_benchmark(
449449
dataset_num_private_threads: Optional[int] = None,
450450
loss_scale: Optional[str] = None):
451451
"""Runs and reports the benchmark given the provided configuration."""
452-
self._setup()
453452
FLAGS.model_type = 'resnet'
454453
FLAGS.dataset = 'imagenet'
455454
FLAGS.mode = 'train_and_eval'
@@ -490,6 +489,7 @@ def _run_and_report_benchmark(
490489

491490
def benchmark_1_gpu_no_dist_strat(self):
492491
"""Tests Keras model with 1 GPU, no distribution strategy."""
492+
self._setup()
493493
self._run_and_report_benchmark(
494494
experiment_name='benchmark_1_gpu_no_dist_strat',
495495
num_gpus=1,
@@ -498,6 +498,7 @@ def benchmark_1_gpu_no_dist_strat(self):
498498

499499
def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
500500
"""Tests Keras model with 1 GPU, no distribution strategy, run eagerly."""
501+
self._setup()
501502
self._run_and_report_benchmark(
502503
experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly',
503504
num_gpus=1,
@@ -507,6 +508,7 @@ def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
507508

508509
def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
509510
"""Tests with 1 GPU, no distribution strategy, fp16, run eagerly."""
511+
self._setup()
510512
self._run_and_report_benchmark(
511513
experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly_fp16',
512514
num_gpus=1,
@@ -517,6 +519,7 @@ def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
517519

518520
def benchmark_1_gpu(self):
519521
"""Tests Keras model with 1 GPU."""
522+
self._setup()
520523
self._run_and_report_benchmark(
521524
experiment_name='benchmark_1_gpu',
522525
num_gpus=1,
@@ -525,16 +528,17 @@ def benchmark_1_gpu(self):
525528

526529
def benchmark_xla_1_gpu(self):
527530
"""Tests Keras model with XLA and 1 GPU."""
531+
self._setup()
528532
self._run_and_report_benchmark(
529533
experiment_name='benchmark_xla_1_gpu',
530534
num_gpus=1,
531535
enable_xla=True,
532536
distribution_strategy='one_device',
533537
per_replica_batch_size=128)
534-
self._setup()
535538

536539
def benchmark_1_gpu_fp16(self):
537540
"""Tests Keras model with 1 GPU and fp16."""
541+
self._setup()
538542
self._run_and_report_benchmark(
539543
experiment_name='benchmark_1_gpu_fp16',
540544
num_gpus=1,
@@ -544,6 +548,7 @@ def benchmark_1_gpu_fp16(self):
544548

545549
def benchmark_1_gpu_fp16_dynamic(self):
546550
"""Tests Keras model with 1 GPU, fp16, and dynamic loss scaling."""
551+
self._setup()
547552
self._run_and_report_benchmark(
548553
experiment_name='benchmark_1_gpu_fp16_dynamic',
549554
num_gpus=1,
@@ -554,6 +559,7 @@ def benchmark_1_gpu_fp16_dynamic(self):
554559

555560
def benchmark_xla_1_gpu_fp16(self):
556561
"""Tests Keras model with XLA, 1 GPU and fp16."""
562+
self._setup()
557563
self._run_and_report_benchmark(
558564
experiment_name='benchmark_xla_1_gpu_fp16',
559565
num_gpus=1,
@@ -564,6 +570,7 @@ def benchmark_xla_1_gpu_fp16(self):
564570

565571
def benchmark_xla_1_gpu_fp16_tweaked(self):
566572
"""Tests Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
573+
self._setup()
567574
self._run_and_report_benchmark(
568575
experiment_name='benchmark_xla_1_gpu_fp16_tweaked',
569576
num_gpus=1,
@@ -575,6 +582,7 @@ def benchmark_xla_1_gpu_fp16_tweaked(self):
575582

576583
def benchmark_xla_1_gpu_fp16_dynamic(self):
577584
"""Tests Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
585+
self._setup()
578586
self._run_and_report_benchmark(
579587
experiment_name='benchmark_xla_1_gpu_fp16_dynamic',
580588
num_gpus=1,
@@ -586,6 +594,7 @@ def benchmark_xla_1_gpu_fp16_dynamic(self):
586594

587595
def benchmark_graph_1_gpu(self):
588596
"""Tests Keras model in legacy graph mode with 1 GPU."""
597+
self._setup()
589598
self._run_and_report_benchmark(
590599
experiment_name='benchmark_graph_1_gpu',
591600
num_gpus=1,
@@ -594,6 +603,7 @@ def benchmark_graph_1_gpu(self):
594603

595604
def benchmark_graph_xla_1_gpu(self):
596605
"""Tests Keras model in legacy graph mode with XLA and 1 GPU."""
606+
self._setup()
597607
self._run_and_report_benchmark(
598608
experiment_name='benchmark_graph_xla_1_gpu',
599609
num_gpus=1,
@@ -603,6 +613,7 @@ def benchmark_graph_xla_1_gpu(self):
603613

604614
def benchmark_8_gpu(self):
605615
"""Tests Keras model with 8 GPUs."""
616+
self._setup()
606617
self._run_and_report_benchmark(
607618
experiment_name='benchmark_8_gpu',
608619
num_gpus=8,
@@ -611,6 +622,7 @@ def benchmark_8_gpu(self):
611622

612623
def benchmark_8_gpu_tweaked(self):
613624
"""Tests Keras model with manual config tuning and 8 GPUs."""
625+
self._setup()
614626
self._run_and_report_benchmark(
615627
experiment_name='benchmark_8_gpu_tweaked',
616628
num_gpus=8,
@@ -620,6 +632,7 @@ def benchmark_8_gpu_tweaked(self):
620632

621633
def benchmark_xla_8_gpu(self):
622634
"""Tests Keras model with XLA and 8 GPUs."""
635+
self._setup()
623636
self._run_and_report_benchmark(
624637
experiment_name='benchmark_xla_8_gpu',
625638
num_gpus=8,
@@ -629,6 +642,7 @@ def benchmark_xla_8_gpu(self):
629642

630643
def benchmark_xla_8_gpu_tweaked(self):
631644
"""Tests Keras model with manual config tuning, 8 GPUs, and XLA."""
645+
self._setup()
632646
self._run_and_report_benchmark(
633647
experiment_name='benchmark_xla_8_gpu_tweaked',
634648
num_gpus=8,
@@ -640,6 +654,7 @@ def benchmark_xla_8_gpu_tweaked(self):
640654

641655
def benchmark_8_gpu_fp16(self):
642656
"""Tests Keras model with 8 GPUs and fp16."""
657+
self._setup()
643658
self._run_and_report_benchmark(
644659
experiment_name='benchmark_8_gpu_fp16',
645660
num_gpus=8,
@@ -649,6 +664,7 @@ def benchmark_8_gpu_fp16(self):
649664

650665
def benchmark_8_gpu_fp16_tweaked(self):
651666
"""Tests Keras model with 8 GPUs, fp16, and manual config tuning."""
667+
self._setup()
652668
self._run_and_report_benchmark(
653669
experiment_name='benchmark_8_gpu_fp16_tweaked',
654670
num_gpus=8,
@@ -659,6 +675,7 @@ def benchmark_8_gpu_fp16_tweaked(self):
659675

660676
def benchmark_8_gpu_fp16_dynamic_tweaked(self):
661677
"""Tests Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
678+
self._setup()
662679
self._run_and_report_benchmark(
663680
experiment_name='benchmark_8_gpu_fp16_dynamic_tweaked',
664681
num_gpus=8,
@@ -670,6 +687,7 @@ def benchmark_8_gpu_fp16_dynamic_tweaked(self):
670687

671688
def benchmark_xla_8_gpu_fp16(self):
672689
"""Tests Keras model with XLA, 8 GPUs and fp16."""
690+
self._setup()
673691
self._run_and_report_benchmark(
674692
experiment_name='benchmark_xla_8_gpu_fp16',
675693
dtype='float16',
@@ -680,6 +698,7 @@ def benchmark_xla_8_gpu_fp16(self):
680698

681699
def benchmark_xla_8_gpu_fp16_tweaked(self):
682700
"""Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
701+
self._setup()
683702
self._run_and_report_benchmark(
684703
experiment_name='benchmark_xla_8_gpu_fp16_tweaked',
685704
dtype='float16',
@@ -695,6 +714,7 @@ def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
695714
696715
Delay performance measurement for stable performance on 96 vCPU platforms.
697716
"""
717+
self._setup()
698718
self._run_and_report_benchmark(
699719
experiment_name='benchmark_xla_8_gpu_fp16_tweaked_delay_measure',
700720
dtype='float16',
@@ -707,6 +727,7 @@ def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
707727

708728
def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
709729
"""Tests Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
730+
self._setup()
710731
self._run_and_report_benchmark(
711732
experiment_name='benchmark_xla_8_gpu_fp16_dynamic_tweaked',
712733
dtype='float16',
@@ -720,6 +741,7 @@ def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
720741

721742
def benchmark_graph_8_gpu(self):
722743
"""Tests Keras model in legacy graph mode with 8 GPUs."""
744+
self._setup()
723745
self._run_and_report_benchmark(
724746
experiment_name='benchmark_graph_8_gpu',
725747
num_gpus=8,
@@ -728,6 +750,7 @@ def benchmark_graph_8_gpu(self):
728750

729751
def benchmark_graph_xla_8_gpu(self):
730752
"""Tests Keras model in legacy graph mode with XLA and 8 GPUs."""
753+
self._setup()
731754
self._run_and_report_benchmark(
732755
experiment_name='benchmark_graph_xla_8_gpu',
733756
num_gpus=8,
@@ -737,6 +760,7 @@ def benchmark_graph_xla_8_gpu(self):
737760

738761
def benchmark_2x2_tpu_fp16(self):
739762
"""Test Keras model with 2x2 TPU, fp16."""
763+
self._setup()
740764
self._run_and_report_benchmark(
741765
experiment_name='benchmark_2x2_tpu_fp16',
742766
dtype='bfloat16',
@@ -745,6 +769,7 @@ def benchmark_2x2_tpu_fp16(self):
745769

746770
def benchmark_4x4_tpu_fp16(self):
747771
"""Test Keras model with 4x4 TPU, fp16."""
772+
self._setup()
748773
self._run_and_report_benchmark(
749774
experiment_name='benchmark_4x4_tpu_fp16',
750775
dtype='bfloat16',
@@ -1395,8 +1420,7 @@ class Resnet50KerasBenchmarkReal(Resnet50KerasClassifierBenchmarkBase):
13951420
"""Resnet50 real data benchmark tests."""
13961421

13971422
def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs):
1398-
data_dir = ('/readahead/200M/placer/prod/home/distbelief/'
1399-
'imagenet-tensorflow/imagenet-2012-tfrecord')
1423+
data_dir = os.path.join(root_data_dir, 'imagenet')
14001424
def_flags = {}
14011425
def_flags['log_steps'] = 10
14021426

official/benchmark/resnet_ctl_imagenet_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
# limitations under the License.
1414
# ==============================================================================
1515
"""Executes CTL benchmarks and accuracy tests."""
16+
# pylint: disable=line-too-long,g-bad-import-order
1617
from __future__ import print_function
1718

1819
import os
1920
import time
2021

21-
# pylint: disable=g-bad-import-order
2222
from absl import flags
2323
import tensorflow as tf
2424

0 commit comments

Comments
 (0)