Skip to content

Commit fb7ca48

Browse files
authored
Add image classification unit test using simplified fluid API (#10306)
1 parent 95d2651 commit fb7ca48

File tree

2 files changed

+269
-0
lines changed

2 files changed

+269
-0
lines changed
Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
import paddle
18+
import paddle.fluid as fluid
19+
import numpy
20+
21+
22+
def resnet_cifar10(input, depth=32):
23+
def conv_bn_layer(input,
24+
ch_out,
25+
filter_size,
26+
stride,
27+
padding,
28+
act='relu',
29+
bias_attr=False):
30+
tmp = fluid.layers.conv2d(
31+
input=input,
32+
filter_size=filter_size,
33+
num_filters=ch_out,
34+
stride=stride,
35+
padding=padding,
36+
act=None,
37+
bias_attr=bias_attr)
38+
return fluid.layers.batch_norm(input=tmp, act=act)
39+
40+
def shortcut(input, ch_in, ch_out, stride):
41+
if ch_in != ch_out:
42+
return conv_bn_layer(input, ch_out, 1, stride, 0, None)
43+
else:
44+
return input
45+
46+
def basicblock(input, ch_in, ch_out, stride):
47+
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
48+
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
49+
short = shortcut(input, ch_in, ch_out, stride)
50+
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
51+
52+
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
53+
tmp = block_func(input, ch_in, ch_out, stride)
54+
for i in range(1, count):
55+
tmp = block_func(tmp, ch_out, ch_out, 1)
56+
return tmp
57+
58+
assert (depth - 2) % 6 == 0
59+
n = (depth - 2) / 6
60+
conv1 = conv_bn_layer(
61+
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
62+
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
63+
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
64+
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
65+
pool = fluid.layers.pool2d(
66+
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
67+
return pool
68+
69+
70+
def inference_network():
71+
classdim = 10
72+
data_shape = [3, 32, 32]
73+
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
74+
net = resnet_cifar10(images, 32)
75+
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
76+
return predict
77+
78+
79+
def train_network():
80+
predict = inference_network()
81+
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
82+
cost = fluid.layers.cross_entropy(input=predict, label=label)
83+
avg_cost = fluid.layers.mean(cost)
84+
accuracy = fluid.layers.accuracy(input=predict, label=label)
85+
return avg_cost, accuracy
86+
87+
88+
def train(use_cuda, save_path):
89+
BATCH_SIZE = 128
90+
EPOCH_NUM = 1
91+
92+
train_reader = paddle.batch(
93+
paddle.reader.shuffle(
94+
paddle.dataset.cifar.train10(), buf_size=128 * 10),
95+
batch_size=BATCH_SIZE)
96+
97+
test_reader = paddle.batch(
98+
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
99+
100+
def event_handler(event):
101+
if isinstance(event, fluid.EndIteration):
102+
if (event.batch_id % 10) == 0:
103+
avg_cost, accuracy = trainer.test(reader=test_reader)
104+
105+
print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
106+
event.batch_id + 1, avg_cost, accuracy))
107+
108+
if accuracy > 0.01: # Low threshold for speeding up CI
109+
trainer.params.save(save_path)
110+
return
111+
112+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
113+
trainer = fluid.Trainer(
114+
train_network,
115+
optimizer=fluid.optimizer.Adam(learning_rate=0.001),
116+
place=place,
117+
event_handler=event_handler)
118+
trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler)
119+
120+
121+
def infer(use_cuda, save_path):
122+
params = fluid.Params(save_path)
123+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
124+
inferencer = fluid.Inferencer(inference_network, params, place=place)
125+
126+
# The input's dimension of conv should be 4-D or 5-D.
127+
# Use normilized image pixels as input data, which should be in the range
128+
# [0, 1.0].
129+
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
130+
results = inferencer.infer({'pixel': tensor_img})
131+
132+
print("infer results: ", results)
133+
134+
135+
def main(use_cuda):
136+
if use_cuda and not fluid.core.is_compiled_with_cuda():
137+
return
138+
save_path = "image_classification_resnet.inference.model"
139+
train(use_cuda, save_path)
140+
infer(use_cuda, save_path)
141+
142+
143+
if __name__ == '__main__':
144+
for use_cuda in (False, True):
145+
main(use_cuda=use_cuda)
Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
import paddle
18+
import paddle.fluid as fluid
19+
import numpy
20+
21+
22+
def vgg16_bn_drop(input):
23+
def conv_block(input, num_filter, groups, dropouts):
24+
return fluid.nets.img_conv_group(
25+
input=input,
26+
pool_size=2,
27+
pool_stride=2,
28+
conv_num_filter=[num_filter] * groups,
29+
conv_filter_size=3,
30+
conv_act='relu',
31+
conv_with_batchnorm=True,
32+
conv_batchnorm_drop_rate=dropouts,
33+
pool_type='max')
34+
35+
conv1 = conv_block(input, 64, 2, [0.3, 0])
36+
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
37+
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
38+
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
39+
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
40+
41+
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
42+
fc1 = fluid.layers.fc(input=drop, size=4096, act=None)
43+
bn = fluid.layers.batch_norm(input=fc1, act='relu')
44+
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
45+
fc2 = fluid.layers.fc(input=drop2, size=4096, act=None)
46+
return fc2
47+
48+
49+
def inference_network():
50+
classdim = 10
51+
data_shape = [3, 32, 32]
52+
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
53+
net = vgg16_bn_drop(images)
54+
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
55+
return predict
56+
57+
58+
def train_network():
59+
predict = inference_network()
60+
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
61+
cost = fluid.layers.cross_entropy(input=predict, label=label)
62+
avg_cost = fluid.layers.mean(cost)
63+
accuracy = fluid.layers.accuracy(input=predict, label=label)
64+
return avg_cost, accuracy
65+
66+
67+
def train(use_cuda, save_path):
68+
BATCH_SIZE = 128
69+
EPOCH_NUM = 1
70+
71+
train_reader = paddle.batch(
72+
paddle.reader.shuffle(
73+
paddle.dataset.cifar.train10(), buf_size=128 * 10),
74+
batch_size=BATCH_SIZE)
75+
76+
test_reader = paddle.batch(
77+
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
78+
79+
def event_handler(event):
80+
if isinstance(event, fluid.EndIteration):
81+
if (event.batch_id % 10) == 0:
82+
avg_cost, accuracy = trainer.test(reader=test_reader)
83+
84+
print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
85+
event.batch_id + 1, avg_cost, accuracy))
86+
87+
if accuracy > 0.01: # Low threshold for speeding up CI
88+
trainer.params.save(save_path)
89+
return
90+
91+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
92+
trainer = fluid.Trainer(
93+
train_network,
94+
optimizer=fluid.optimizer.Adam(learning_rate=0.001),
95+
place=place,
96+
event_handler=event_handler)
97+
trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler)
98+
99+
100+
def infer(use_cuda, save_path):
101+
params = fluid.Params(save_path)
102+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
103+
inferencer = fluid.Inferencer(inference_network, params, place=place)
104+
105+
# The input's dimension of conv should be 4-D or 5-D.
106+
# Use normilized image pixels as input data, which should be in the range
107+
# [0, 1.0].
108+
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
109+
results = inferencer.infer({'pixel': tensor_img})
110+
111+
print("infer results: ", results)
112+
113+
114+
def main(use_cuda):
115+
if use_cuda and not fluid.core.is_compiled_with_cuda():
116+
return
117+
save_path = "image_classification_vgg.inference.model"
118+
train(use_cuda, save_path)
119+
infer(use_cuda, save_path)
120+
121+
122+
if __name__ == '__main__':
123+
for use_cuda in (False, True):
124+
main(use_cuda=use_cuda)

0 commit comments

Comments
 (0)