Skip to content

Commit a6ef41f

Browse files
author
chengduo
authored
Fix high level api bug on release-0.15 (#13164)
* fix high level API(Inference) bug * patch the unit tests
1 parent c5763c1 commit a6ef41f

File tree

5 files changed

+110
-40
lines changed

5 files changed

+110
-40
lines changed

python/paddle/fluid/inferencer.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,10 +98,9 @@ def infer(self, inputs, return_numpy=True):
9898
raise ValueError(
9999
"inputs should be a map of {'input_name': input_var}")
100100

101-
with executor.scope_guard(self.scope):
102-
results = self.exe.run(self.inference_program,
103-
feed=inputs,
104-
fetch_list=[self.predict_var],
101+
with self._prog_and_scope_guard():
102+
results = self.exe.run(feed=inputs,
103+
fetch_list=[self.predict_var.name],
105104
return_numpy=return_numpy)
106105

107106
return results

python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py

Lines changed: 28 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@
1616

1717
import paddle
1818
import paddle.fluid as fluid
19+
import paddle.fluid.core as core
1920
import numpy
21+
import os
2022
import cifar10_small_test_set
2123

2224

@@ -89,7 +91,7 @@ def optimizer_func():
8991
return fluid.optimizer.Adam(learning_rate=0.001)
9092

9193

92-
def train(use_cuda, train_program, params_dirname):
94+
def train(use_cuda, train_program, parallel, params_dirname):
9395
BATCH_SIZE = 128
9496
EPOCH_NUM = 1
9597

@@ -116,7 +118,10 @@ def event_handler(event):
116118

117119
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
118120
trainer = fluid.Trainer(
119-
train_func=train_program, optimizer_func=optimizer_func, place=place)
121+
train_func=train_program,
122+
optimizer_func=optimizer_func,
123+
place=place,
124+
parallel=parallel)
120125

121126
trainer.train(
122127
reader=train_reader,
@@ -125,10 +130,13 @@ def event_handler(event):
125130
feed_order=['pixel', 'label'])
126131

127132

128-
def infer(use_cuda, inference_program, params_dirname=None):
133+
def infer(use_cuda, inference_program, parallel, params_dirname=None):
129134
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
130135
inferencer = fluid.Inferencer(
131-
infer_func=inference_program, param_path=params_dirname, place=place)
136+
infer_func=inference_program,
137+
param_path=params_dirname,
138+
place=place,
139+
parallel=parallel)
132140

133141
# The input's dimension of conv should be 4-D or 5-D.
134142
# Use normilized image pixels as input data, which should be in the range
@@ -139,22 +147,34 @@ def infer(use_cuda, inference_program, params_dirname=None):
139147
print("infer results: ", results)
140148

141149

142-
def main(use_cuda):
150+
def main(use_cuda, parallel):
143151
if use_cuda and not fluid.core.is_compiled_with_cuda():
144152
return
145153
save_path = "image_classification_resnet.inference.model"
146154

155+
os.environ['CPU_NUM'] = str(4)
147156
train(
148157
use_cuda=use_cuda,
149158
train_program=train_network,
150-
params_dirname=save_path)
159+
params_dirname=save_path,
160+
parallel=parallel)
151161

162+
# FIXME(zcd): in the inference stage, the number of
163+
# input data is one, it is not appropriate to use parallel.
164+
if parallel and use_cuda:
165+
return
166+
167+
os.environ['CPU_NUM'] = str(1)
152168
infer(
153169
use_cuda=use_cuda,
154170
inference_program=inference_network,
155-
params_dirname=save_path)
171+
params_dirname=save_path,
172+
parallel=parallel)
156173

157174

158175
if __name__ == '__main__':
159176
for use_cuda in (False, True):
160-
main(use_cuda=use_cuda)
177+
for parallel in (False, True):
178+
if use_cuda and not core.is_compiled_with_cuda():
179+
continue
180+
main(use_cuda=use_cuda, parallel=parallel)

python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@
1616

1717
import paddle
1818
import paddle.fluid as fluid
19+
import paddle.fluid.core as core
1920
import numpy
21+
import os
2022
import cifar10_small_test_set
2123

2224

@@ -68,7 +70,7 @@ def optimizer_func():
6870
return fluid.optimizer.Adam(learning_rate=0.001)
6971

7072

71-
def train(use_cuda, train_program, params_dirname):
73+
def train(use_cuda, train_program, parallel, params_dirname):
7274
BATCH_SIZE = 128
7375
train_reader = paddle.batch(
7476
paddle.reader.shuffle(
@@ -93,7 +95,10 @@ def event_handler(event):
9395

9496
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
9597
trainer = fluid.Trainer(
96-
train_func=train_program, place=place, optimizer_func=optimizer_func)
98+
train_func=train_program,
99+
place=place,
100+
optimizer_func=optimizer_func,
101+
parallel=parallel)
97102

98103
trainer.train(
99104
reader=train_reader,
@@ -102,10 +107,13 @@ def event_handler(event):
102107
feed_order=['pixel', 'label'])
103108

104109

105-
def infer(use_cuda, inference_program, params_dirname=None):
110+
def infer(use_cuda, inference_program, parallel, params_dirname=None):
106111
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
107112
inferencer = fluid.Inferencer(
108-
infer_func=inference_program, param_path=params_dirname, place=place)
113+
infer_func=inference_program,
114+
param_path=params_dirname,
115+
place=place,
116+
parallel=parallel)
109117

110118
# The input's dimension of conv should be 4-D or 5-D.
111119
# Use normilized image pixels as input data, which should be in the range
@@ -116,22 +124,31 @@ def infer(use_cuda, inference_program, params_dirname=None):
116124
print("infer results: ", results)
117125

118126

119-
def main(use_cuda):
120-
if use_cuda and not fluid.core.is_compiled_with_cuda():
121-
return
127+
def main(use_cuda, parallel):
122128
save_path = "image_classification_vgg.inference.model"
123129

130+
os.environ['CPU_NUM'] = str(4)
124131
train(
125132
use_cuda=use_cuda,
126133
train_program=train_network,
127-
params_dirname=save_path)
134+
params_dirname=save_path,
135+
parallel=parallel)
128136

137+
# FIXME(zcd): in the inference stage, the number of
138+
# input data is one, it is not appropriate to use parallel.
139+
if parallel and use_cuda:
140+
return
141+
os.environ['CPU_NUM'] = str(1)
129142
infer(
130143
use_cuda=use_cuda,
131144
inference_program=inference_network,
132-
params_dirname=save_path)
145+
params_dirname=save_path,
146+
parallel=parallel)
133147

134148

135149
if __name__ == '__main__':
136150
for use_cuda in (False, True):
137-
main(use_cuda=use_cuda)
151+
for parallel in (False, True):
152+
if use_cuda and not core.is_compiled_with_cuda():
153+
continue
154+
main(use_cuda=use_cuda, parallel=parallel)

python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -64,14 +64,14 @@ def optimizer_func():
6464
return fluid.optimizer.Adam(learning_rate=0.001)
6565

6666

67-
def train(use_cuda, train_program, params_dirname):
67+
def train(use_cuda, train_program, parallel, params_dirname):
6868
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
6969

7070
trainer = fluid.Trainer(
7171
train_func=train_program,
7272
place=place,
7373
optimizer_func=optimizer_func,
74-
parallel=True)
74+
parallel=parallel)
7575

7676
def event_handler(event):
7777
if isinstance(event, fluid.EndEpochEvent):
@@ -108,11 +108,14 @@ def event_handler(event):
108108
feed_order=['img', 'label'])
109109

110110

111-
def infer(use_cuda, inference_program, params_dirname=None):
111+
def infer(use_cuda, inference_program, parallel, params_dirname=None):
112112
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
113113

114114
inferencer = fluid.Inferencer(
115-
infer_func=inference_program, param_path=params_dirname, place=place)
115+
infer_func=inference_program,
116+
param_path=params_dirname,
117+
place=place,
118+
parallel=parallel)
116119

117120
batch_size = 1
118121
tensor_img = numpy.random.uniform(-1.0, 1.0,
@@ -123,20 +126,32 @@ def infer(use_cuda, inference_program, params_dirname=None):
123126
print("infer results: ", results[0])
124127

125128

126-
def main(use_cuda):
129+
def main(use_cuda, parallel):
127130
params_dirname = "recognize_digits_conv.inference.model"
128131

129132
# call train() with is_local argument to run distributed train
133+
os.environ['CPU_NUM'] = str(4)
130134
train(
131135
use_cuda=use_cuda,
132136
train_program=train_program,
133-
params_dirname=params_dirname)
137+
params_dirname=params_dirname,
138+
parallel=parallel)
139+
140+
# FIXME(zcd): in the inference stage, the number of
141+
# input data is one, it is not appropriate to use parallel.
142+
if parallel and use_cuda:
143+
return
144+
os.environ['CPU_NUM'] = str(1)
134145
infer(
135146
use_cuda=use_cuda,
136147
inference_program=inference_program,
137-
params_dirname=params_dirname)
148+
params_dirname=params_dirname,
149+
parallel=parallel)
138150

139151

140152
if __name__ == '__main__':
141-
# for use_cuda in (False, True):
142-
main(use_cuda=core.is_compiled_with_cuda())
153+
for use_cuda in (False, True):
154+
for parallel in (False, True):
155+
if use_cuda and not core.is_compiled_with_cuda():
156+
continue
157+
main(use_cuda=use_cuda, parallel=parallel)

python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
import argparse
1818
import paddle.fluid as fluid
19+
import paddle.fluid.core as core
1920
import paddle
2021
import sys
2122
import numpy
@@ -50,11 +51,14 @@ def optimizer_func():
5051
return fluid.optimizer.Adam(learning_rate=0.001)
5152

5253

53-
def train(use_cuda, train_program, params_dirname):
54+
def train(use_cuda, train_program, params_dirname, parallel):
5455
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
5556

5657
trainer = fluid.Trainer(
57-
train_func=train_program, place=place, optimizer_func=optimizer_func)
58+
train_func=train_program,
59+
place=place,
60+
optimizer_func=optimizer_func,
61+
parallel=parallel)
5862

5963
def event_handler(event):
6064
if isinstance(event, fluid.EndEpochEvent):
@@ -86,11 +90,14 @@ def event_handler(event):
8690
feed_order=['img', 'label'])
8791

8892

89-
def infer(use_cuda, inference_program, params_dirname=None):
93+
def infer(use_cuda, inference_program, parallel, params_dirname=None):
9094
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
9195

9296
inferencer = fluid.Inferencer(
93-
infer_func=inference_program, param_path=params_dirname, place=place)
97+
infer_func=inference_program,
98+
param_path=params_dirname,
99+
place=place,
100+
parallel=parallel)
94101

95102
batch_size = 1
96103
tensor_img = numpy.random.uniform(-1.0, 1.0,
@@ -101,20 +108,32 @@ def infer(use_cuda, inference_program, params_dirname=None):
101108
print("infer results: ", results[0])
102109

103110

104-
def main(use_cuda):
111+
def main(use_cuda, parallel):
105112
params_dirname = "recognize_digits_mlp.inference.model"
106113

107114
# call train() with is_local argument to run distributed train
115+
os.environ['CPU_NUM'] = str(4)
108116
train(
109117
use_cuda=use_cuda,
110118
train_program=train_program,
111-
params_dirname=params_dirname)
119+
params_dirname=params_dirname,
120+
parallel=parallel)
121+
122+
# FIXME(zcd): in the inference stage, the number of
123+
# input data is one, it is not appropriate to use parallel.
124+
if parallel and use_cuda:
125+
return
126+
os.environ['CPU_NUM'] = str(1)
112127
infer(
113128
use_cuda=use_cuda,
114129
inference_program=inference_program,
115-
params_dirname=params_dirname)
130+
params_dirname=params_dirname,
131+
parallel=parallel)
116132

117133

118134
if __name__ == '__main__':
119-
# for use_cuda in (False, True):
120-
main(use_cuda=False)
135+
for use_cuda in (False, True):
136+
for parallel in (False, True):
137+
if use_cuda and not core.is_compiled_with_cuda():
138+
continue
139+
main(use_cuda=use_cuda, parallel=parallel)

0 commit comments

Comments
 (0)