Skip to content

Commit fd9dc75

Browse files
committed
Merge remote-tracking branch 'origin/develop' into memory/stable
2 parents 873b517 + bdbf1bc commit fd9dc75

File tree

147 files changed

+4582
-2113
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

147 files changed

+4582
-2113
lines changed

README.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ Our vision is to enable deep learning for everyone via PaddlePaddle.
1919
Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddle/releases) to track the latest feature of PaddlePaddle.
2020

2121

22-
### Latest PaddlePaddle Release: [Fluid 0.14.0](https://github.com/PaddlePaddle/Paddle/tree/v0.14.0)
22+
### Latest PaddlePaddle Release: [Fluid 0.15.0](https://github.com/PaddlePaddle/Paddle/tree/v0.15.0)
2323
### Install Latest Stable Release:
2424
```
2525
# Linux CPU
@@ -76,26 +76,26 @@ pip install paddlepaddle-gpu==0.14.0.post85
7676

7777
## Installation
7878

79-
It is recommended to read [this doc](http://paddlepaddle.org/documentation/docs/zh/0.14.0/new_docs/beginners_guide/install/install_doc.html) on our website.
79+
It is recommended to read [this doc](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/beginners_guide/install/install_doc.html) on our website.
8080

8181
## Documentation
8282

83-
We provide [English](http://paddlepaddle.org/documentation/docs/en/0.14.0/getstarted/index_en.html) and
84-
[Chinese](http://paddlepaddle.org/documentation/docs/zh/0.14.0/new_docs/beginners_guide/index.html) documentation.
83+
We provide [English](http://paddlepaddle.org/documentation/docs/en/0.15.0/getstarted/index_en.html) and
84+
[Chinese](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/beginners_guide/index.html) documentation.
8585

8686
- [Deep Learning 101](https://github.com/PaddlePaddle/book)
8787

8888
You might want to start from this online interactive book that can run in a Jupyter Notebook.
8989

90-
- [Distributed Training](http://paddlepaddle.org/documentation/docs/zh/0.14.0/new_docs/user_guides/howto/training/cluster_howto.html)
90+
- [Distributed Training](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/user_guides/howto/training/cluster_howto.html)
9191

9292
You can run distributed training jobs on MPI clusters.
9393

94-
- [Python API](http://paddlepaddle.org/documentation/api/zh/0.14.0/fluid.html)
94+
- [Python API](http://paddlepaddle.org/documentation/api/zh/0.15.0/fluid.html)
9595

9696
Our new API enables much shorter programs.
9797

98-
- [How to Contribute](http://paddlepaddle.org/documentation/docs/zh/0.14.0/new_docs/advanced_usage/development/contribute_to_paddle.html)
98+
- [How to Contribute](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/advanced_usage/development/contribute_to_paddle.html)
9999

100100
We appreciate your contributions!
101101

benchmark/fluid/args.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,5 +140,11 @@ def parse_args():
140140
'--use_lars',
141141
action='store_true',
142142
help='If set, use lars for optimizers, ONLY support resnet module.')
143+
parser.add_argument(
144+
'--reduce_strategy',
145+
type=str,
146+
choices=['reduce', 'all_reduce'],
147+
default='all_reduce',
148+
help='Specify the reduce strategy, can be reduce, all_reduce')
143149
args = parser.parse_args()
144150
return args

benchmark/fluid/fluid_benchmark.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,8 @@ def dist_transpile(trainer_id, args, train_prog, startup_prog):
9191
program=train_prog,
9292
pservers=pserver_endpoints,
9393
trainers=trainers,
94-
sync_mode=not args.async_mode)
94+
sync_mode=not args.async_mode,
95+
startup_program=startup_prog)
9596
if training_role == "PSERVER":
9697
pserver_program = t.get_pserver_program(current_endpoint)
9798
pserver_startup_program = t.get_startup_program(
@@ -169,6 +170,14 @@ def train_parallel(train_args, test_args, args, train_prog, test_prog,
169170
strategy = fluid.ExecutionStrategy()
170171
strategy.num_threads = args.cpus
171172
strategy.allow_op_delay = False
173+
build_strategy = fluid.BuildStrategy()
174+
if args.reduce_strategy == "reduce":
175+
build_strategy.reduce_strategy = fluid.BuildStrategy(
176+
).ReduceStrategy.Reduce
177+
else:
178+
build_strategy.reduce_strategy = fluid.BuildStrategy(
179+
).ReduceStrategy.AllReduce
180+
172181
avg_loss = train_args[0]
173182

174183
if args.update_method == "pserver":
@@ -183,6 +192,7 @@ def train_parallel(train_args, test_args, args, train_prog, test_prog,
183192
avg_loss.name,
184193
main_program=train_prog,
185194
exec_strategy=strategy,
195+
build_strategy=build_strategy,
186196
num_trainers=num_trainers,
187197
trainer_id=trainer_id)
188198

benchmark/fluid/models/mnist.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,11 +67,14 @@ def cnn_model(data):
6767

6868
def get_model(args, is_train, main_prog, startup_prog):
6969
# NOTE: mnist is small, we don't implement data sharding yet.
70-
filelist = [
71-
os.path.join(args.data_path, f) for f in os.listdir(args.data_path)
72-
]
70+
opt = None
71+
data_file_handle = None
7372
with fluid.program_guard(main_prog, startup_prog):
7473
if args.use_reader_op:
74+
filelist = [
75+
os.path.join(args.data_path, f)
76+
for f in os.listdir(args.data_path)
77+
]
7578
data_file_handle = fluid.layers.open_files(
7679
filenames=filelist,
7780
shapes=[[-1, 1, 28, 28], (-1, 1)],
@@ -100,7 +103,7 @@ def get_model(args, is_train, main_prog, startup_prog):
100103
if is_train:
101104
opt = fluid.optimizer.AdamOptimizer(
102105
learning_rate=0.001, beta1=0.9, beta2=0.999)
103-
opt.minimize()
106+
opt.minimize(avg_cost)
104107
if args.memory_optimize:
105108
fluid.memory_optimize(main_prog)
106109

0 commit comments

Comments
 (0)