Skip to content

Commit 5a68584

Browse files
committed
Test on GPU
1 parent 65e957c commit 5a68584

File tree

2 files changed

+8
-11
lines changed

2 files changed

+8
-11
lines changed

demo/mnist/api_train.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def input_order_converter(generator):
5353

5454

5555
def main():
56-
api.initPaddle("-use_gpu=true", "-trainer_count=4") # use 4 cpu cores
56+
api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores
5757
config = paddle.trainer.config_parser.parse_config(
5858
'simple_mnist_network.py', '')
5959

@@ -106,7 +106,7 @@ def main():
106106
# TrainData will stored in a data pool. Currently implementation is not care
107107
# about memory, speed. Just a very naive implementation.
108108
train_data_generator = input_order_converter(read_from_mnist(train_file))
109-
train_data = BatchPool(train_data_generator, 128)
109+
train_data = BatchPool(train_data_generator, 512)
110110

111111
# outArgs is Neural Network forward result. Here is not useful, just passed
112112
# to gradient_machine.forward
@@ -126,16 +126,13 @@ def main():
126126
# batch_evaluator can be used between start/finish.
127127
batch_evaluator.start()
128128

129-
# A callback when backward.
130-
# It is used for updating weight values vy calculated Gradient.
131-
def updater_callback(param):
132-
updater.update(param)
133-
134129
# forwardBackward is a shortcut for forward and backward.
135130
# It is sometimes faster than invoke forward/backward separately,
136131
# because in GradientMachine, it may be async.
137-
m.forwardBackward(
138-
converter(data_batch), outArgs, pass_type, updater_callback)
132+
m.forwardBackward(converter(data_batch), outArgs, pass_type)
133+
134+
for each_param in m.getParameters():
135+
updater.update(each_param)
139136

140137
# Get cost. We use numpy to calculate total cost for this batch.
141138
cost_vec = outArgs.getSlotValue(0)
@@ -159,7 +156,7 @@ def updater_callback(param):
159156
updater.apply()
160157
test_evaluator.start()
161158
test_data_generator = input_order_converter(read_from_mnist(test_file))
162-
for data_batch in generator_to_batch(test_data_generator, 128):
159+
for data_batch in generator_to_batch(test_data_generator, 512):
163160
# in testing stage, only forward is needed.
164161
m.forward(converter(data_batch), outArgs, api.PASS_TEST)
165162
m.eval(test_evaluator)

paddle/api/Vector.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ void Vector::copyToNumpyArray(float** view_m_data, int* dim1) {
253253
*view_m_data = new float[*dim1];
254254
if (auto cpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
255255
std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1));
256-
} else if (auto gpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
256+
} else if (auto gpuVec = dynamic_cast<paddle::GpuVector*>(m->vec.get())) {
257257
hl_memcpy_device2host(
258258
*view_m_data, gpuVec->getData(), sizeof(float) * (*dim1));
259259
} else {

0 commit comments

Comments
 (0)