Skip to content

Commit f802940

Browse files
committed
remove Evaluator.Accuracy
1 parent 101378c commit f802940

File tree

7 files changed

+40
-63
lines changed

7 files changed

+40
-63
lines changed

benchmark/cluster/vgg16/vgg16_fluid.py

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2-
#
2+
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
55
# You may obtain a copy of the License at
6-
#
6+
#
77
# http://www.apache.org/licenses/LICENSE-2.0
8-
#
8+
#
99
# Unless required by applicable law or agreed to in writing, software
1010
# distributed under the License is distributed on an "AS IS" BASIS,
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -138,13 +138,14 @@ def main():
138138
avg_cost = fluid.layers.mean(x=cost)
139139

140140
# Evaluator
141-
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
141+
batch_size = fluid.layers.create_tensor(dtype='int64')
142+
batch_acc = fluid.layers.accuracy(
143+
input=predict, label=label, total=batch_size)
142144

143145
# inference program
144146
inference_program = fluid.default_main_program().clone()
145147
with fluid.program_guard(inference_program):
146-
test_target = accuracy.metrics + accuracy.states
147-
inference_program = fluid.io.get_inference_program(test_target)
148+
inference_program = fluid.io.get_inference_program(batch_acc)
148149

149150
# Optimization
150151
optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)
@@ -157,27 +158,30 @@ def main():
157158

158159
# test
159160
def test(exe):
160-
accuracy.reset(exe)
161+
test_pass_acc = fluid.average.WeightedAverage()
161162
for batch_id, data in enumerate(test_reader()):
162163
img_data = np.array(map(lambda x: x[0].reshape(data_shape),
163164
data)).astype("float32")
164165
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
165166
y_data = y_data.reshape([-1, 1])
166167

167-
exe.run(inference_program,
168-
feed={"pixel": img_data,
169-
"label": y_data})
168+
outs = exe.run(inference_program,
169+
feed={"pixel": img_data,
170+
"label": y_data},
171+
fetch_list=[batch_acc, batch_size])
172+
test_pass_acc.add(value=np.array(outs[0]), weight=np.array(outs[1]))
170173

171-
return accuracy.eval(exe)
174+
return test_pass_acc.eval()
172175

173176
def train_loop(exe, trainer_prog):
174177
iters = 0
175178
ts = time.time()
179+
train_pass_acc = fluid.average.WeightedAverage()
176180
for pass_id in range(args.num_passes):
177181
# train
178182
start_time = time.time()
179183
num_samples = 0
180-
accuracy.reset(exe)
184+
train_pass_acc.reset()
181185
with profiler.profiler("CPU", 'total') as prof:
182186
for batch_id, data in enumerate(train_reader()):
183187
ts = time.time()
@@ -187,21 +191,22 @@ def train_loop(exe, trainer_prog):
187191
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
188192
y_data = y_data.reshape([-1, 1])
189193

190-
loss, acc = exe.run(
194+
loss, acc, b_size = exe.run(
191195
trainer_prog,
192196
feed={"pixel": img_data,
193197
"label": y_data},
194-
fetch_list=[avg_cost] + accuracy.metrics)
198+
fetch_list=[avg_cost, batch_acc, batch_size])
195199
iters += 1
196200
num_samples += len(data)
201+
train_pass_acc.add(value=acc, weight=b_size)
197202
print(
198203
"Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, Speed = %.2f img/s"
199204
% (pass_id, iters, loss, acc,
200205
len(data) / (time.time() - ts))
201206
) # The accuracy is the accumulation of batches, but not the current batch.
202207

203208
pass_elapsed = time.time() - start_time
204-
pass_train_acc = accuracy.eval(exe)
209+
pass_train_acc = train_pass_acc.eval()
205210
pass_test_acc = test(exe)
206211
print(
207212
"Pass = %d, Training performance = %f imgs/s, Train accuracy = %f, Test accuracy = %f\n"

python/paddle/fluid/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import learning_rate_decay
3030
import backward
3131
import regularizer
32+
import average
3233
from param_attr import ParamAttr, WeightNormParamAttr
3334
from data_feeder import DataFeeder
3435
from core import LoDTensor, CPUPlace, CUDAPlace
File renamed without changes.

python/paddle/fluid/evaluator.py

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -105,44 +105,6 @@ def create_state(self, suffix, dtype, shape):
105105
return state
106106

107107

108-
class Accuracy(Evaluator):
109-
"""
110-
Average Accuracy for multiple mini-batches.
111-
"""
112-
113-
def __init__(self, input, label, k=1, **kwargs):
114-
super(Accuracy, self).__init__("accuracy", **kwargs)
115-
main_program = self.helper.main_program
116-
if main_program.current_block().idx != 0:
117-
raise ValueError("You can only invoke Evaluator in root block")
118-
119-
self.total = self.create_state(dtype='int64', shape=[1], suffix='total')
120-
self.correct = self.create_state(
121-
dtype='int64', shape=[1], suffix='correct')
122-
total = self.helper.create_tmp_variable(dtype='int')
123-
correct = self.helper.create_tmp_variable(dtype='int')
124-
acc = layers.accuracy(
125-
input=input, label=label, k=k, total=total, correct=correct)
126-
total = layers.cast(x=total, dtype='int64')
127-
correct = layers.cast(x=correct, dtype='int64')
128-
layers.sums(input=[self.total, total], out=self.total)
129-
layers.sums(input=[self.correct, correct], out=self.correct)
130-
131-
self.metrics.append(acc)
132-
133-
def eval(self, executor, eval_program=None):
134-
if eval_program is None:
135-
eval_program = Program()
136-
block = eval_program.current_block()
137-
with program_guard(main_program=eval_program):
138-
total = _clone_var_(block, self.total)
139-
correct = _clone_var_(block, self.correct)
140-
total = layers.cast(total, dtype='float32')
141-
correct = layers.cast(correct, dtype='float32')
142-
out = layers.elementwise_div(x=correct, y=total)
143-
return np.array(executor.run(eval_program, fetch_list=[out])[0])
144-
145-
146108
class ChunkEvaluator(Evaluator):
147109
"""
148110
Accumulate counter numbers output by chunk_eval from mini-batches and

python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,8 @@ def conv_block(input, num_filter, groups, dropouts):
122122
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
123123
opts = optimizer.minimize(avg_cost)
124124

125-
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
125+
batch_size = fluid.layers.create_tensor(dtype='int64')
126+
batch_acc = fluid.layers.accuracy(input=predict, label=label, total=batch_size)
126127

127128
fluid.memory_optimize(fluid.default_main_program())
128129

@@ -144,13 +145,17 @@ def conv_block(input, num_filter, groups, dropouts):
144145
exe.run(fluid.default_startup_program())
145146

146147
i = 0
148+
149+
accuracy = fluid.average.WeightedAverage()
147150
for pass_id in range(PASS_NUM):
148-
accuracy.reset(exe)
151+
accuracy.reset()
149152
for data in train_reader():
150-
loss, acc = exe.run(fluid.default_main_program(),
151-
feed=feeder.feed(data),
152-
fetch_list=[avg_cost] + accuracy.metrics)
153-
pass_acc = accuracy.eval(exe)
153+
loss, acc, weight = exe.run(
154+
fluid.default_main_program(),
155+
feed=feeder.feed(data),
156+
fetch_list=[avg_cost, batch_acc, batch_size])
157+
accuracy.add(value=acc, weight=weight)
158+
pass_acc = accuracy.eval()
154159
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
155160
pass_acc))
156161
# this model is slow, so if we can train two mini batch, we think it works properly.

python/paddle/fluid/tests/unittests/test_profiler.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,9 @@ def net_profiler(self, state):
3737
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
3838
cost = fluid.layers.cross_entropy(input=predict, label=label)
3939
avg_cost = fluid.layers.mean(cost)
40-
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
40+
batch_size = fluid.layers.create_tensor(dtype='int64')
41+
batch_acc = fluid.layers.accuracy(
42+
input=predict, label=label, total=batch_size)
4143

4244
optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
4345
opts = optimizer.minimize(avg_cost, startup_program=startup_program)
@@ -46,7 +48,7 @@ def net_profiler(self, state):
4648
exe = fluid.Executor(place)
4749
exe.run(startup_program)
4850

49-
accuracy.reset(exe)
51+
pass_acc_calculator = fluid.average.WeightedAverage()
5052
with profiler.profiler(state, 'total') as prof:
5153
for iter in range(10):
5254
if iter == 2:
@@ -57,9 +59,11 @@ def net_profiler(self, state):
5759
outs = exe.run(main_program,
5860
feed={'x': x,
5961
'y': y},
60-
fetch_list=[avg_cost] + accuracy.metrics)
62+
fetch_list=[avg_cost, batch_acc, batch_size])
6163
acc = np.array(outs[1])
62-
pass_acc = accuracy.eval(exe)
64+
b_size = np.array(outs[2])
65+
pass_acc_calculator.add(value=acc, weight=b_size)
66+
pass_acc = pass_acc_calculator.eval()
6367

6468
def test_cpu_profiler(self):
6569
self.net_profiler('CPU')

0 commit comments

Comments
 (0)