Skip to content

Commit 3d74615

Browse files
authored
Merge pull request #5615 from helinwang/demo
Simpily demo, add paddle.default_main_program() and paddle.default_startup_program()
2 parents d1d2100 + c089b76 commit 3d74615

11 files changed

+155
-343
lines changed

python/paddle/v2/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@
3737
import paddle.trainer.config_parser as cp
3838

3939
__all__ = [
40+
'default_startup_program',
41+
'default_main_program',
4042
'optimizer',
4143
'layer',
4244
'activation',

python/paddle/v2/fluid/framework.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import numpy as np
55
import copy
66

7-
__all__ = ['Block', 'Variable', 'Program', 'Operator']
7+
__all__ = ['Block', 'Variable', 'Program', 'Operator', 'default_startup_program', 'default_main_program']
88

99

1010
def unique_name(prefix):
@@ -562,3 +562,9 @@ def __init__(self, block, shape, dtype, **kwargs):
562562
# program is a global instance.
563563
g_main_program = Program()
564564
g_startup_program = Program()
565+
566+
def default_startup_program():
567+
return g_startup_program
568+
569+
def default_main_program():
570+
return g_main_program

python/paddle/v2/fluid/tests/book/test_fit_a_line.py

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -2,45 +2,33 @@
22
import paddle.v2.fluid.layers as layers
33
import paddle.v2.fluid.core as core
44
import paddle.v2.fluid.optimizer as optimizer
5-
6-
from paddle.v2.fluid.framework import Program
5+
import paddle.v2.fluid.framework as framework
76
from paddle.v2.fluid.io import save_persistables, load_persistables
87
from paddle.v2.fluid.executor import Executor
98

109
import numpy as np
1110

12-
startup_program = Program()
13-
main_program = Program()
1411
x = layers.data(
1512
name='x',
1613
shape=[13],
17-
data_type='float32',
18-
main_program=main_program,
19-
startup_program=startup_program)
14+
data_type='float32')
2015

2116
y_predict = layers.fc(input=x,
2217
size=1,
23-
act=None,
24-
main_program=main_program,
25-
startup_program=startup_program)
18+
act=None)
2619

2720
y = layers.data(
2821
name='y',
2922
shape=[1],
30-
data_type='float32',
31-
main_program=main_program,
32-
startup_program=startup_program)
23+
data_type='float32')
3324

3425
cost = layers.square_error_cost(
3526
input=y_predict,
36-
label=y,
37-
main_program=main_program,
38-
startup_program=startup_program)
39-
avg_cost = layers.mean(
40-
x=cost, main_program=main_program, startup_program=startup_program)
27+
label=y)
28+
avg_cost = layers.mean(x=cost)
4129

4230
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
43-
opts = sgd_optimizer.minimize(avg_cost, startup_program)
31+
opts = sgd_optimizer.minimize(avg_cost)
4432

4533
BATCH_SIZE = 20
4634

@@ -52,12 +40,12 @@
5240
place = core.CPUPlace()
5341
exe = Executor(place)
5442

55-
exe.run(startup_program)
43+
exe.run(framework.default_startup_program())
5644

5745
PASS_NUM = 100
5846
for pass_id in range(PASS_NUM):
59-
save_persistables(exe, "./fit_a_line.model/", main_program=main_program)
60-
load_persistables(exe, "./fit_a_line.model/", main_program=main_program)
47+
save_persistables(exe, "./fit_a_line.model/")
48+
load_persistables(exe, "./fit_a_line.model/")
6149
for data in train_reader():
6250
x_data = np.array(map(lambda x: x[0], data)).astype("float32")
6351
y_data = np.array(map(lambda x: x[1], data)).astype("float32")
@@ -69,7 +57,7 @@
6957
tensor_y = core.LoDTensor()
7058
tensor_y.set(y_data, place)
7159
# print tensor_y.get_dims()
72-
outs = exe.run(main_program,
60+
outs = exe.run(framework.default_main_program(),
7361
feed={'x': tensor_x,
7462
'y': tensor_y},
7563
fetch_list=[avg_cost])

python/paddle/v2/fluid/tests/book/test_image_classification_train.py

Lines changed: 34 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -5,34 +5,28 @@
55
import paddle.v2.fluid.nets as nets
66
import paddle.v2.fluid.optimizer as optimizer
77
from paddle.v2.fluid.executor import Executor
8-
from paddle.v2.fluid.framework import g_startup_program, g_main_program
8+
import paddle.v2.fluid.framework as framework
99
from paddle.v2.fluid.initializer import XavierInitializer
1010

1111

12-
def resnet_cifar10(input, depth=32, main_program=None, startup_program=None):
12+
def resnet_cifar10(input, depth=32):
1313
def conv_bn_layer(input,
1414
ch_out,
1515
filter_size,
1616
stride,
1717
padding,
18-
act='relu',
19-
main_program=None,
20-
startup_program=None):
18+
act='relu'):
2119
tmp = layers.conv2d(
2220
input=input,
2321
filter_size=filter_size,
2422
num_filters=ch_out,
2523
stride=stride,
2624
padding=padding,
2725
act=None,
28-
bias_attr=False,
29-
main_program=main_program,
30-
startup_program=startup_program)
26+
bias_attr=False)
3127
return layers.batch_norm(
3228
input=tmp,
33-
act=act,
34-
main_program=main_program,
35-
startup_program=startup_program)
29+
act=act)
3630

3731
def shortcut(input, ch_in, ch_out, stride, program, init_program):
3832
if ch_in != ch_out:
@@ -44,40 +38,30 @@ def shortcut(input, ch_in, ch_out, stride, program, init_program):
4438
def basicblock(input,
4539
ch_in,
4640
ch_out,
47-
stride,
48-
main_program=main_program,
49-
startup_program=startup_program):
41+
stride):
5042
tmp = conv_bn_layer(
5143
input,
5244
ch_out,
5345
3,
5446
stride,
55-
1,
56-
main_program=main_program,
57-
startup_program=startup_program)
47+
1)
5848
tmp = conv_bn_layer(
5949
tmp,
6050
ch_out,
6151
3,
6252
1,
6353
1,
64-
act=None,
65-
main_program=main_program,
66-
startup_program=startup_program)
67-
short = shortcut(input, ch_in, ch_out, stride, main_program,
68-
startup_program)
54+
act=None)
55+
short = shortcut(input, ch_in, ch_out, stride)
6956
return layers.elementwise_add(
7057
x=tmp,
7158
y=short,
72-
act='relu',
73-
main_program=main_program,
74-
startup_program=startup_program)
59+
act='relu')
7560

76-
def layer_warp(block_func, input, ch_in, ch_out, count, stride, program,
77-
startup_program):
78-
tmp = block_func(input, ch_in, ch_out, stride, program, startup_program)
61+
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
62+
tmp = block_func(input, ch_in, ch_out, stride)
7963
for i in range(1, count):
80-
tmp = block_func(tmp, ch_out, ch_out, 1, program, startup_program)
64+
tmp = block_func(tmp, ch_out, ch_out, 1)
8165
return tmp
8266

8367
assert (depth - 2) % 6 == 0
@@ -87,53 +71,41 @@ def layer_warp(block_func, input, ch_in, ch_out, count, stride, program,
8771
ch_out=16,
8872
filter_size=3,
8973
stride=1,
90-
padding=1,
91-
main_program=main_program,
92-
startup_program=startup_program)
74+
padding=1)
9375
res1 = layer_warp(
9476
basicblock,
9577
conv1,
9678
16,
9779
16,
9880
n,
99-
1,
100-
main_program=main_program,
101-
startup_program=startup_program)
81+
1)
10282
res2 = layer_warp(
10383
basicblock,
10484
res1,
10585
16,
10686
32,
10787
n,
108-
2,
109-
main_program=main_program,
110-
startup_program=startup_program)
88+
2)
11189
res3 = layer_warp(
11290
basicblock,
11391
res2,
11492
32,
11593
64,
11694
n,
117-
2,
118-
main_program=main_program,
119-
startup_program=startup_program)
95+
2)
12096
pool = layers.pool2d(
12197
input=res3,
12298
pool_size=8,
12399
pool_type='avg',
124-
pool_stride=1,
125-
main_program=main_program,
126-
startup_program=startup_program)
100+
pool_stride=1)
127101
return pool
128102

129103

130-
def vgg16_bn_drop(input, main_program=None, startup_program=None):
104+
def vgg16_bn_drop(input):
131105
def conv_block(input,
132106
num_filter,
133107
groups,
134-
dropouts,
135-
main_program=None,
136-
startup_program=None):
108+
dropouts):
137109
return nets.img_conv_group(
138110
input=input,
139111
pool_size=2,
@@ -143,51 +115,34 @@ def conv_block(input,
143115
conv_act='relu',
144116
conv_with_batchnorm=True,
145117
conv_batchnorm_drop_rate=dropouts,
146-
pool_type='max',
147-
main_program=main_program,
148-
startup_program=startup_program)
118+
pool_type='max')
149119

150-
conv1 = conv_block(input, 64, 2, [0.3, 0], main_program, startup_program)
151-
conv2 = conv_block(conv1, 128, 2, [0.4, 0], main_program, startup_program)
152-
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0], main_program,
153-
startup_program)
154-
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0], main_program,
155-
startup_program)
156-
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0], main_program,
157-
startup_program)
120+
conv1 = conv_block(input, 64, 2, [0.3, 0])
121+
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
122+
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
123+
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
124+
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
158125

159126
drop = layers.dropout(
160127
x=conv5,
161-
dropout_prob=0.5,
162-
main_program=main_program,
163-
startup_program=startup_program)
128+
dropout_prob=0.5)
164129
fc1 = layers.fc(input=drop,
165130
size=512,
166131
act=None,
167-
param_attr={"initializer": XavierInitializer()},
168-
main_program=main_program,
169-
startup_program=startup_program)
132+
param_attr={"initializer": XavierInitializer()})
170133
reshape1 = layers.reshape(
171134
x=fc1,
172-
shape=list(fc1.shape + (1, 1)),
173-
main_program=main_program,
174-
startup_program=startup_program)
135+
shape=list(fc1.shape + (1, 1)))
175136
bn = layers.batch_norm(
176137
input=reshape1,
177-
act='relu',
178-
main_program=main_program,
179-
startup_program=startup_program)
138+
act='relu')
180139
drop2 = layers.dropout(
181140
x=bn,
182-
dropout_prob=0.5,
183-
main_program=main_program,
184-
startup_program=startup_program)
141+
dropout_prob=0.5)
185142
fc2 = layers.fc(input=drop2,
186143
size=512,
187144
act=None,
188-
param_attr={"initializer": XavierInitializer()},
189-
main_program=main_program,
190-
startup_program=startup_program)
145+
param_attr={"initializer": XavierInitializer()})
191146
return fc2
192147

193148

@@ -225,7 +180,7 @@ def conv_block(input,
225180
place = core.CPUPlace()
226181
exe = Executor(place)
227182

228-
exe.run(g_startup_program)
183+
exe.run(framework.default_startup_program())
229184

230185
for pass_id in range(PASS_NUM):
231186
batch_id = 0
@@ -243,7 +198,7 @@ def conv_block(input,
243198
tensor_img.set(img_data, place)
244199
tensor_y.set(y_data, place)
245200

246-
outs = exe.run(g_main_program,
201+
outs = exe.run(framework.default_main_program(),
247202
feed={"pixel": tensor_img,
248203
"label": tensor_y},
249204
fetch_list=[avg_cost, accuracy])

0 commit comments

Comments
 (0)