Skip to content

Commit 62c51e4

Browse files
authored
"add float64 tests" (#10450)
* "add float64 tests" * "fix based comment" * "fixed based comment"
1 parent 01a2773 commit 62c51e4

File tree

3 files changed

+91
-8
lines changed

3 files changed

+91
-8
lines changed

python/paddle/fluid/executor.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -299,14 +299,18 @@ def run(self,
299299
if feed is None:
300300
feed = {}
301301
if not isinstance(feed, dict):
302-
raise TypeError("feed should be a map")
302+
raise TypeError(
303+
"feed requires dict as its Parameter. But you passed in %s" %
304+
(type(feed)))
303305
if fetch_list is None:
304306
fetch_list = []
305307
if program is None:
306308
program = default_main_program()
307309

308310
if not isinstance(program, Program):
309-
raise TypeError()
311+
raise TypeError(
312+
"Executor requires Program as its Parameter. But you passed in %s"
313+
% (type(program)))
310314

311315
if scope is None:
312316
scope = global_scope()

python/paddle/fluid/optimizer.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ def __init__(self, learning_rate, regularization=None):
4747
raise TypeError("learning rate should be float or Variable")
4848
self.regularization = regularization
4949
self._learning_rate = learning_rate
50+
# the learning rate type should be inferenced from loss
51+
self._dtype = None
5052
# each program should have a independent learning rate
5153
# program -> Variable(learning_rate)
5254
self._learning_rate_map = dict()
@@ -77,7 +79,7 @@ def _create_global_learning_rate(self):
7779
name=unique_name.generate("learning_rate"),
7880
shape=[1],
7981
value=float(self._learning_rate),
80-
dtype='float32',
82+
dtype='float32' if self._dtype == None else self._dtype,
8183
persistable=True)
8284

8385
def global_learning_rate(self, program=None):
@@ -200,6 +202,7 @@ def create_optimization_pass(self,
200202

201203
# Create any accumulators
202204
program = loss.block.program
205+
self._dtype = loss.dtype
203206
with program_guard(program, startup_program):
204207
global_block = framework.default_main_program().global_block()
205208
start = len(global_block.ops)
@@ -391,7 +394,7 @@ def _create_accumulators(self, block, parameters):
391394
beta_shape = [1]
392395
self._beta1_pow_acc = self.helper.create_global_variable(
393396
name=unique_name.generate('beta1_pow_acc'),
394-
dtype='float32',
397+
dtype='float32' if self._dtype == None else self._dtype,
395398
shape=beta_shape,
396399
lod_level=0,
397400
persistable=True)
@@ -400,7 +403,7 @@ def _create_accumulators(self, block, parameters):
400403

401404
self._beta2_pow_acc = self.helper.create_global_variable(
402405
name=unique_name.generate('beta2_pow_acc'),
403-
dtype='float32',
406+
dtype='float32' if self._dtype == None else self._dtype,
404407
shape=beta_shape,
405408
lod_level=0,
406409
persistable=True)
@@ -493,7 +496,7 @@ def _create_accumulators(self, block, parameters):
493496
beta_shape = [1]
494497
self._beta1_pow_acc = self.helper.create_global_variable(
495498
name=unique_name.generate('beta1_pow_acc'),
496-
dtype='float32',
499+
dtype='float32' if self._dtype == None else self._dtype,
497500
shape=beta_shape,
498501
lod_level=0,
499502
persistable=True)
@@ -900,8 +903,10 @@ def _add_average_apply_op(self, block, param_grad):
900903
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
901904
tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
902905
sum = layers.sum(x=[sum_1, sum_2, sum_3])
903-
tmp = layers.cast(x=tmp, dtype='float32')
904-
sum = layers.cast(x=sum, dtype='float32')
906+
tmp = layers.cast(
907+
x=tmp, dtype='float32' if self._dtype == None else self._dtype)
908+
sum = layers.cast(
909+
x=sum, dtype='float32' if self._dtype == None else self._dtype)
905910
layers.elementwise_div(x=sum, y=tmp, out=param)
906911

907912
def _add_average_restore_op(self, block, param_grad):
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import unittest
16+
17+
import numpy as np
18+
import paddle
19+
import paddle.fluid as fluid
20+
import paddle.fluid.core as core
21+
from paddle.fluid.executor import Executor
22+
23+
BATCH_SIZE = 20
24+
25+
26+
class TestNetWithDtype(unittest.TestCase):
27+
def setUp(self):
28+
self.dtype = "float64"
29+
self.init_dtype()
30+
self.x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype)
31+
self.y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype)
32+
y_predict = fluid.layers.fc(input=self.x, size=1, act=None)
33+
34+
cost = fluid.layers.square_error_cost(input=y_predict, label=self.y)
35+
avg_cost = fluid.layers.mean(cost)
36+
self.fetch_list = [avg_cost]
37+
38+
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
39+
sgd_optimizer.minimize(avg_cost)
40+
41+
def run_net_on_place(self, place):
42+
train_reader = paddle.batch(
43+
paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE)
44+
feeder = fluid.DataFeeder(place=place, feed_list=[self.x, self.y])
45+
exe = fluid.Executor(place)
46+
exe.run(fluid.default_startup_program())
47+
for data in train_reader():
48+
exe.run(fluid.default_main_program(),
49+
feed=feeder.feed(data),
50+
fetch_list=self.fetch_list)
51+
# the main program is runable, the datatype is fully supported
52+
break
53+
54+
def init_dtype(self):
55+
pass
56+
57+
def test_cpu(self):
58+
place = fluid.CPUPlace()
59+
self.run_net_on_place(place)
60+
61+
def test_gpu(self):
62+
if not core.is_compiled_with_cuda():
63+
return
64+
place = fluid.CUDAPlace(0)
65+
self.run_net_on_place(place)
66+
67+
68+
# TODO(dzhwinter): make sure the fp16 is runable
69+
# class TestFloat16(SimpleNet):
70+
# def init_dtype(self):
71+
# self.dtype = "float16"
72+
73+
if __name__ == '__main__':
74+
unittest.main()

0 commit comments

Comments
 (0)