Skip to content

Commit 0ce38b7

Browse files
authored
correct optimizer import (#5699)
1 parent 06d155b commit 0ce38b7

9 files changed

+101
-270
lines changed

python/paddle/v2/fluid/tests/book/test_fit_a_line.py

Lines changed: 9 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,22 @@
1+
import numpy as np
12
import paddle.v2 as paddle
2-
import paddle.v2.fluid.layers as layers
33
import paddle.v2.fluid.core as core
4-
import paddle.v2.fluid.optimizer as optimizer
54
import paddle.v2.fluid.framework as framework
6-
from paddle.v2.fluid.io import save_persistables, load_persistables
5+
import paddle.v2.fluid.layers as layers
76
from paddle.v2.fluid.executor import Executor
7+
from paddle.v2.fluid.io import save_persistables, load_persistables
8+
from paddle.v2.fluid.optimizer import SGDOptimizer
89

9-
import numpy as np
10-
11-
x = layers.data(
12-
name='x',
13-
shape=[13],
14-
data_type='float32')
10+
x = layers.data(name='x', shape=[13], data_type='float32')
1511

16-
y_predict = layers.fc(input=x,
17-
size=1,
18-
act=None)
12+
y_predict = layers.fc(input=x, size=1, act=None)
1913

20-
y = layers.data(
21-
name='y',
22-
shape=[1],
23-
data_type='float32')
14+
y = layers.data(name='y', shape=[1], data_type='float32')
2415

25-
cost = layers.square_error_cost(
26-
input=y_predict,
27-
label=y)
16+
cost = layers.square_error_cost(input=y_predict, label=y)
2817
avg_cost = layers.mean(x=cost)
2918

30-
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
19+
sgd_optimizer = SGDOptimizer(learning_rate=0.001)
3120
opts = sgd_optimizer.minimize(avg_cost)
3221

3322
BATCH_SIZE = 20

python/paddle/v2/fluid/tests/book/test_image_classification_train.py

Lines changed: 20 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,16 @@
11
import numpy as np
22
import paddle.v2 as paddle
33
import paddle.v2.fluid.core as core
4+
import paddle.v2.fluid.framework as framework
45
import paddle.v2.fluid.layers as layers
56
import paddle.v2.fluid.nets as nets
6-
import paddle.v2.fluid.optimizer as optimizer
77
from paddle.v2.fluid.executor import Executor
8-
import paddle.v2.fluid.framework as framework
98
from paddle.v2.fluid.initializer import XavierInitializer
9+
from paddle.v2.fluid.optimizer import AdamOptimizer
1010

1111

1212
def resnet_cifar10(input, depth=32):
13-
def conv_bn_layer(input,
14-
ch_out,
15-
filter_size,
16-
stride,
17-
padding,
18-
act='relu'):
13+
def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'):
1914
tmp = layers.conv2d(
2015
input=input,
2116
filter_size=filter_size,
@@ -24,9 +19,7 @@ def conv_bn_layer(input,
2419
padding=padding,
2520
act=None,
2621
bias_attr=False)
27-
return layers.batch_norm(
28-
input=tmp,
29-
act=act)
22+
return layers.batch_norm(input=tmp, act=act)
3023

3124
def shortcut(input, ch_in, ch_out, stride, program, init_program):
3225
if ch_in != ch_out:
@@ -35,28 +28,11 @@ def shortcut(input, ch_in, ch_out, stride, program, init_program):
3528
else:
3629
return input
3730

38-
def basicblock(input,
39-
ch_in,
40-
ch_out,
41-
stride):
42-
tmp = conv_bn_layer(
43-
input,
44-
ch_out,
45-
3,
46-
stride,
47-
1)
48-
tmp = conv_bn_layer(
49-
tmp,
50-
ch_out,
51-
3,
52-
1,
53-
1,
54-
act=None)
31+
def basicblock(input, ch_in, ch_out, stride):
32+
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
33+
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None)
5534
short = shortcut(input, ch_in, ch_out, stride)
56-
return layers.elementwise_add(
57-
x=tmp,
58-
y=short,
59-
act='relu')
35+
return layers.elementwise_add(x=tmp, y=short, act='relu')
6036

6137
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
6238
tmp = block_func(input, ch_in, ch_out, stride)
@@ -67,45 +43,17 @@ def layer_warp(block_func, input, ch_in, ch_out, count, stride):
6743
assert (depth - 2) % 6 == 0
6844
n = (depth - 2) / 6
6945
conv1 = conv_bn_layer(
70-
input=input,
71-
ch_out=16,
72-
filter_size=3,
73-
stride=1,
74-
padding=1)
75-
res1 = layer_warp(
76-
basicblock,
77-
conv1,
78-
16,
79-
16,
80-
n,
81-
1)
82-
res2 = layer_warp(
83-
basicblock,
84-
res1,
85-
16,
86-
32,
87-
n,
88-
2)
89-
res3 = layer_warp(
90-
basicblock,
91-
res2,
92-
32,
93-
64,
94-
n,
95-
2)
46+
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
47+
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
48+
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
49+
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
9650
pool = layers.pool2d(
97-
input=res3,
98-
pool_size=8,
99-
pool_type='avg',
100-
pool_stride=1)
51+
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
10152
return pool
10253

10354

10455
def vgg16_bn_drop(input):
105-
def conv_block(input,
106-
num_filter,
107-
groups,
108-
dropouts):
56+
def conv_block(input, num_filter, groups, dropouts):
10957
return nets.img_conv_group(
11058
input=input,
11159
pool_size=2,
@@ -123,22 +71,14 @@ def conv_block(input,
12371
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
12472
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
12573

126-
drop = layers.dropout(
127-
x=conv5,
128-
dropout_prob=0.5)
74+
drop = layers.dropout(x=conv5, dropout_prob=0.5)
12975
fc1 = layers.fc(input=drop,
13076
size=512,
13177
act=None,
13278
param_attr={"initializer": XavierInitializer()})
133-
reshape1 = layers.reshape(
134-
x=fc1,
135-
shape=list(fc1.shape + (1, 1)))
136-
bn = layers.batch_norm(
137-
input=reshape1,
138-
act='relu')
139-
drop2 = layers.dropout(
140-
x=bn,
141-
dropout_prob=0.5)
79+
reshape1 = layers.reshape(x=fc1, shape=list(fc1.shape + (1, 1)))
80+
bn = layers.batch_norm(input=reshape1, act='relu')
81+
drop2 = layers.dropout(x=bn, dropout_prob=0.5)
14282
fc2 = layers.fc(input=drop2,
14383
size=512,
14484
act=None,
@@ -165,8 +105,8 @@ def conv_block(input,
165105
avg_cost = layers.mean(x=cost)
166106
accuracy = layers.accuracy(input=predict, label=label)
167107

168-
# optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
169-
optimizer = optimizer.AdamOptimizer(learning_rate=0.001)
108+
# optimizer = SGDOptimizer(learning_rate=0.001)
109+
optimizer = AdamOptimizer(learning_rate=0.001)
170110
opts = optimizer.minimize(avg_cost)
171111

172112
BATCH_SIZE = 128

python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py

Lines changed: 9 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,15 @@
1+
import numpy as np
12
import paddle.v2 as paddle
2-
import paddle.v2.fluid.layers as layers
3-
import paddle.v2.fluid.nets as nets
43
import paddle.v2.fluid.core as core
5-
import paddle.v2.fluid.optimizer as optimizer
64
import paddle.v2.fluid.evaluator as evaluator
75
import paddle.v2.fluid.framework as framework
6+
import paddle.v2.fluid.layers as layers
7+
import paddle.v2.fluid.nets as nets
88
from paddle.v2.fluid.executor import Executor
9+
from paddle.v2.fluid.optimizer import AdamOptimizer
910

10-
import numpy as np
11-
12-
images = layers.data(
13-
name='pixel',
14-
shape=[1, 28, 28],
15-
data_type='float32')
16-
label = layers.data(
17-
name='label',
18-
shape=[1],
19-
data_type='int64')
11+
images = layers.data(name='pixel', shape=[1, 28, 28], data_type='float32')
12+
label = layers.data(name='label', shape=[1], data_type='int64')
2013
conv_pool_1 = nets.simple_img_conv_pool(
2114
input=images,
2215
filter_size=5,
@@ -32,17 +25,13 @@
3225
pool_stride=2,
3326
act="relu")
3427

35-
predict = layers.fc(input=conv_pool_2,
36-
size=10,
37-
act="softmax")
28+
predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
3829
cost = layers.cross_entropy(input=predict, label=label)
3930
avg_cost = layers.mean(x=cost)
40-
optimizer = optimizer.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999)
31+
optimizer = AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999)
4132
opts = optimizer.minimize(avg_cost)
4233

43-
accuracy, acc_out = evaluator.accuracy(
44-
input=predict,
45-
label=label)
34+
accuracy, acc_out = evaluator.accuracy(input=predict, label=label)
4635

4736
BATCH_SIZE = 50
4837
PASS_NUM = 3

python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py

Lines changed: 10 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,15 @@
1+
import numpy as np
12
import paddle.v2 as paddle
2-
import paddle.v2.fluid.layers as layers
33
import paddle.v2.fluid.core as core
4-
import paddle.v2.fluid.optimizer as optimizer
54
import paddle.v2.fluid.framework as framework
5+
import paddle.v2.fluid.layers as layers
66
from paddle.v2.fluid.executor import Executor
7-
from paddle.v2.fluid.regularizer import L2DecayRegularizer
87
from paddle.v2.fluid.initializer import UniformInitializer
9-
10-
import numpy as np
8+
from paddle.v2.fluid.optimizer import MomentumOptimizer
9+
from paddle.v2.fluid.regularizer import L2DecayRegularizer
1110

1211
BATCH_SIZE = 128
13-
image = layers.data(
14-
name='x',
15-
shape=[784],
16-
data_type='float32')
12+
image = layers.data(name='x', shape=[784], data_type='float32')
1713

1814
param_attr = {
1915
'name': None,
@@ -22,32 +18,21 @@
2218
'regularization': L2DecayRegularizer(0.0005 * BATCH_SIZE)
2319
}
2420

25-
hidden1 = layers.fc(input=image,
26-
size=128,
27-
act='relu',
28-
param_attr=param_attr)
29-
hidden2 = layers.fc(input=hidden1,
30-
size=64,
31-
act='relu',
32-
param_attr=param_attr)
21+
hidden1 = layers.fc(input=image, size=128, act='relu', param_attr=param_attr)
22+
hidden2 = layers.fc(input=hidden1, size=64, act='relu', param_attr=param_attr)
3323

3424
predict = layers.fc(input=hidden2,
3525
size=10,
3626
act='softmax',
3727
param_attr=param_attr)
3828

39-
label = layers.data(
40-
name='y',
41-
shape=[1],
42-
data_type='int64')
29+
label = layers.data(name='y', shape=[1], data_type='int64')
4330

4431
cost = layers.cross_entropy(input=predict, label=label)
4532
avg_cost = layers.mean(x=cost)
46-
accuracy = layers.accuracy(
47-
input=predict,
48-
label=label)
33+
accuracy = layers.accuracy(input=predict, label=label)
4934

50-
optimizer = optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
35+
optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
5136
opts = optimizer.minimize(avg_cost)
5237

5338
train_reader = paddle.batch(

0 commit comments

Comments
 (0)