Skip to content

Commit f2c5574

Browse files
authored
Merge pull request #11158 from luotao1/benchmark1
refine benchmark/fluid
2 parents 78afcbf + 9d1dae3 commit f2c5574

File tree

3 files changed

+26
-23
lines changed

3 files changed

+26
-23
lines changed

benchmark/.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,6 @@ paddle/rnn/imdb.pkl
77
caffe/image/logs
88
tensorflow/image/logs
99
tensorflow/rnn/logs
10+
fluid/models/*.pyc
11+
fluid/logs
12+
fluid/nohup.out

benchmark/fluid/fluid_benchmark.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,7 @@ def parse_args():
4040
parser.add_argument(
4141
'--batch_size', type=int, default=32, help='The minibatch size.')
4242
parser.add_argument(
43-
'--learning_rate',
44-
type=float,
45-
default=0.001,
46-
help='The minibatch size.')
43+
'--learning_rate', type=float, default=0.001, help='The learning rate.')
4744
# TODO(wuyi): add "--use_fake_data" option back.
4845
parser.add_argument(
4946
'--skip_batch_num',
@@ -231,10 +228,7 @@ def train(avg_loss, infer_prog, optimizer, train_reader, test_reader, batch_acc,
231228
train_losses.append(loss)
232229
print("Pass: %d, Iter: %d, Loss: %f\n" %
233230
(pass_id, iters, np.mean(train_losses)))
234-
train_elapsed = time.time() - start_time
235-
examples_per_sec = num_samples / train_elapsed
236-
print('\nTotal examples: %d, total time: %.5f, %.5f examples/sec\n' %
237-
(num_samples, train_elapsed, examples_per_sec))
231+
print_train_time(start_time, time.time(), num_samples)
238232
print("Pass: %d, Loss: %f" % (pass_id, np.mean(train_losses)))
239233
# evaluation
240234
if not args.no_test and batch_acc != None:
@@ -315,10 +309,7 @@ def train_parallel(avg_loss, infer_prog, optimizer, train_reader, test_reader,
315309
if batch_id % 1 == 0:
316310
print("Pass %d, batch %d, loss %s" %
317311
(pass_id, batch_id, np.array(loss)))
318-
train_elapsed = time.time() - start_time
319-
examples_per_sec = num_samples / train_elapsed
320-
print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' %
321-
(num_samples, train_elapsed, examples_per_sec))
312+
print_train_time(start_time, time.time(), num_samples)
322313
if not args.no_test and batch_acc != None:
323314
test_acc = test(startup_exe, infer_prog, test_reader, feeder,
324315
batch_acc)
@@ -329,20 +320,27 @@ def train_parallel(avg_loss, infer_prog, optimizer, train_reader, test_reader,
329320
def print_arguments(args):
330321
vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and
331322
vars(args)['device'] == 'GPU')
332-
print('----------- resnet Configuration Arguments -----------')
323+
print('----------- Configuration Arguments -----------')
333324
for arg, value in sorted(vars(args).iteritems()):
334325
print('%s: %s' % (arg, value))
335326
print('------------------------------------------------')
336327

337328

329+
def print_train_time(start_time, end_time, num_samples):
330+
train_elapsed = end_time - start_time
331+
examples_per_sec = num_samples / train_elapsed
332+
print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' %
333+
(num_samples, train_elapsed, examples_per_sec))
334+
335+
338336
def main():
339337
args = parse_args()
340338
print_arguments(args)
341339

342340
# the unique trainer id, starting from 0, needed by trainer
343341
# only
344342
nccl_id_var, num_trainers, trainer_id = (
345-
None, 1, int(os.getenv("PADDLE_TRAINER_ID", "-1")))
343+
None, 1, int(os.getenv("PADDLE_TRAINER_ID", "0")))
346344

347345
if args.use_cprof:
348346
pr = cProfile.Profile()

benchmark/fluid/run.sh

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# This script benchmarking the PaddlePaddle Fluid on
33
# single thread single GPU.
44

5+
mkdir -p logs
56
#export FLAGS_fraction_of_gpu_memory_to_use=0.0
67
export CUDNN_PATH=/paddle/cudnn_v5
78

@@ -35,6 +36,7 @@ nohup stdbuf -oL nvidia-smi \
3536
--format=csv \
3637
--filename=mem.log \
3738
-l 1 &
39+
3840
# mnist
3941
# mnist gpu mnist 128
4042
FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
@@ -43,7 +45,7 @@ FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
4345
--batch_size=128 \
4446
--skip_batch_num=5 \
4547
--iterations=500 \
46-
2>&1 | tee -a mnist_gpu_128.log
48+
2>&1 | tee -a logs/mnist_gpu_128.log
4749

4850
# vgg16
4951
# gpu cifar10 128
@@ -53,7 +55,7 @@ FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
5355
--batch_size=128 \
5456
--skip_batch_num=5 \
5557
--iterations=30 \
56-
2>&1 | tee -a vgg16_gpu_128.log
58+
2>&1 | tee -a logs/vgg16_gpu_128.log
5759

5860
# flowers gpu 128
5961
FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
@@ -63,28 +65,28 @@ FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
6365
--data_set=flowers \
6466
--skip_batch_num=5 \
6567
--iterations=30 \
66-
2>&1 | tee -a vgg16_gpu_flowers_32.log
68+
2>&1 | tee -a logs/vgg16_gpu_flowers_32.log
6769

6870
# resnet50
6971
# resnet50 gpu cifar10 128
7072
FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
71-
--model=resnet50 \
73+
--model=resnet \
7274
--device=GPU \
7375
--batch_size=128 \
7476
--data_set=cifar10 \
7577
--skip_batch_num=5 \
7678
--iterations=30 \
77-
2>&1 | tee -a resnet50_gpu_128.log
79+
2>&1 | tee -a logs/resnet50_gpu_128.log
7880

7981
# resnet50 gpu flowers 64
8082
FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
81-
--model=resnet50 \
83+
--model=resnet \
8284
--device=GPU \
8385
--batch_size=64 \
8486
--data_set=flowers \
8587
--skip_batch_num=5 \
8688
--iterations=30 \
87-
2>&1 | tee -a resnet50_gpu_flowers_64.log
89+
2>&1 | tee -a logs/resnet50_gpu_flowers_64.log
8890

8991
# lstm
9092
# lstm gpu imdb 32 # tensorflow only support batch=32
@@ -94,7 +96,7 @@ FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
9496
--batch_size=32 \
9597
--skip_batch_num=5 \
9698
--iterations=30 \
97-
2>&1 | tee -a lstm_gpu_32.log
99+
2>&1 | tee -a logs/lstm_gpu_32.log
98100

99101
# seq2seq
100102
# seq2seq gpu wmb 128
@@ -104,4 +106,4 @@ FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \
104106
--batch_size=128 \
105107
--skip_batch_num=5 \
106108
--iterations=30 \
107-
2>&1 | tee -a lstm_gpu_128.log
109+
2>&1 | tee -a logs/lstm_gpu_128.log

0 commit comments

Comments
 (0)