Skip to content

Commit e967d19

Browse files
committed
add more tests
1 parent a13ec34 commit e967d19

File tree

3 files changed

+75
-34
lines changed

3 files changed

+75
-34
lines changed

paddle/fluid/operators/conv_cudnn_op.cu.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
282282
platform::CUDAPlace gpu = boost::get<platform::CUDAPlace>(ctx.GetPlace());
283283
cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes);
284284
// ------------------- cudnn conv backward data ---------------------
285-
T alpha = 1.0f, beta = 0.0f;
285+
typename platform::CudnnDataType<T>::ScalingParamType alpha = 1.0f,
286+
beta = 0.0f;
286287
if (input_grad) {
287288
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
288289
// Because beta is zero, it is unnecessary to reset input_grad.

python/paddle/fluid/tests/unittests/op_test.py

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -470,29 +470,26 @@ def _numpy_to_lod_tensor(np_value, lod, place):
470470
return tensor
471471

472472
@staticmethod
473-
def create_view(input):
474-
"""Create a view of the input numpy array
473+
def np_dtype_to_fluid_dtype(input):
474+
"""Change the dtype of float16 numpy array
475475
476476
numpy float16 is binded to paddle::platform::float16
477-
in tensor_py.h via the help of numpy uint16 because
477+
in tensor_py.h via the help of uint16 data type since
478478
the internal memory representation of float16 is
479-
uint16_t in paddle or np.uint16 in numpy, which are
480-
themselves binded together.
479+
uint16_t in paddle and np.uint16 in numpy, which are
480+
themselves binded together by pybind.
481481
482482
Args:
483483
input: input numpy array
484484
485485
Returns:
486-
input_view: if the dtype of input is np.float16, input_view
487-
will reinterpret input as with dtype np.uint16.
488-
Otherwise, input_view will be input itself.
486+
input: if the dtype of input is np.float16, its dtype will be
487+
changed to np.uint16 so that the internal memory will be
488+
reinterpreted input as of dtype np.uint16.
489489
"""
490490
if input.dtype == np.float16:
491-
# view will only reinterpret memory without copying
492-
input_view = input.view(np.uint16)
493-
else:
494-
input_view = input
495-
return input_view
491+
input.dtype = np.uint16
492+
return input
496493

497494
def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
498495
prog = Program()

python/paddle/fluid/tests/unittests/test_conv2d_op.py

Lines changed: 63 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -82,18 +82,9 @@ def setUp(self):
8282
output = conv2d_forward_naive(input, filter, self.groups,
8383
conv2d_param).astype(self.dtype)
8484

85-
# numpy float16 is binded to paddle::platform::float16
86-
# in tensor_py.h via the help of numpy uint16 because
87-
# the internal memory representation of float16 is
88-
# uint16_t in paddle or np.uint16 in numpy, which are
89-
# themselves binded together.
9085
self.inputs = {
91-
#'Input': (input.view(np.uint16)
92-
# if self.dtype == np.float16 else input),
93-
#'Filter': (filter.view(np.uint16)
94-
# if self.dtype == np.float16 else filter)
95-
'Input': OpTest.create_view(input),
96-
'Filter': OpTest.create_view(filter)
86+
'Input': OpTest.np_dtype_to_fluid_dtype(input),
87+
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
9788
}
9889
self.attrs = {
9990
'strides': self.stride,
@@ -113,6 +104,8 @@ def test_check_output(self):
113104
self.check_output()
114105

115106
def test_check_grad(self):
107+
if self.dtype == np.float16:
108+
return
116109
if self.use_cudnn:
117110
place = core.CUDAPlace(0)
118111
self.check_grad_with_place(
@@ -125,6 +118,8 @@ def test_check_grad(self):
125118
set(['Input', 'Filter']), 'Output', max_relative_error=0.02)
126119

127120
def test_check_grad_no_filter(self):
121+
if self.dtype == np.float16:
122+
return
128123
if self.use_cudnn:
129124
place = core.CUDAPlace(0)
130125
self.check_grad_with_place(
@@ -140,6 +135,8 @@ def test_check_grad_no_filter(self):
140135
no_grad_set=set(['Filter']))
141136

142137
def test_check_grad_no_input(self):
138+
if self.dtype == np.float16:
139+
return
143140
if self.use_cudnn:
144141
place = core.CUDAPlace(0)
145142
self.check_grad_with_place(
@@ -259,46 +256,92 @@ def test_check_output(self):
259256
if core.is_float16_supported(place):
260257
self.check_output_with_place(place, atol=2e-2)
261258

262-
def test_check_grad(self):
263-
pass
264-
265-
def test_check_grad_no_filter(self):
266-
pass
267-
268-
def test_check_grad_no_input(self):
269-
pass
270-
271259

272260
class TestCUDNNWithPad(TestWithPad):
273261
def init_op_type(self):
274262
self.use_cudnn = True
275263
self.op_type = "conv2d"
276264

277265

266+
class TestFP16CUDNNWithPad(TestCUDNNWithPad):
267+
def init_data_type(self):
268+
self.dtype = np.float16
269+
270+
def test_check_output(self):
271+
if core.is_compiled_with_cuda():
272+
place = core.CUDAPlace(0)
273+
if core.is_float16_supported(place):
274+
self.check_output_with_place(place, atol=2e-2)
275+
276+
278277
class TestCUDNNWithStride(TestWithStride):
279278
def init_op_type(self):
280279
self.use_cudnn = True
281280
self.op_type = "conv2d"
282281

283282

283+
class TestFP16CUDNNWithStride(TestCUDNNWithStride):
284+
def init_data_type(self):
285+
self.dtype = np.float16
286+
287+
def test_check_output(self):
288+
if core.is_compiled_with_cuda():
289+
place = core.CUDAPlace(0)
290+
if core.is_float16_supported(place):
291+
self.check_output_with_place(place, atol=2e-2)
292+
293+
284294
class TestCUDNNWithGroup(TestWithGroup):
285295
def init_op_type(self):
286296
self.use_cudnn = True
287297
self.op_type = "conv2d"
288298

289299

300+
class TestFP16CUDNNWithGroup(TestCUDNNWithGroup):
301+
def init_data_type(self):
302+
self.dtype = np.float16
303+
304+
def test_check_output(self):
305+
if core.is_compiled_with_cuda():
306+
place = core.CUDAPlace(0)
307+
if core.is_float16_supported(place):
308+
self.check_output_with_place(place, atol=2e-2)
309+
310+
290311
class TestCUDNNWith1x1(TestWith1x1):
291312
def init_op_type(self):
292313
self.use_cudnn = True
293314
self.op_type = "conv2d"
294315

295316

317+
class TestFP16CUDNNWith1x1(TestCUDNNWith1x1):
318+
def init_data_type(self):
319+
self.dtype = np.float16
320+
321+
def test_check_output(self):
322+
if core.is_compiled_with_cuda():
323+
place = core.CUDAPlace(0)
324+
if core.is_float16_supported(place):
325+
self.check_output_with_place(place, atol=2e-2)
326+
327+
296328
class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
297329
def init_op_type(self):
298330
self.use_cudnn = True
299331
self.op_type = "conv2d"
300332

301333

334+
class TestFP16CUDNNWithInput1x1Filter1x1(TestCUDNNWithInput1x1Filter1x1):
335+
def init_data_type(self):
336+
self.dtype = np.float16
337+
338+
def test_check_output(self):
339+
if core.is_compiled_with_cuda():
340+
place = core.CUDAPlace(0)
341+
if core.is_float16_supported(place):
342+
self.check_output_with_place(place, atol=2e-2)
343+
344+
302345
class TestDepthwiseConv(TestConv2dOp):
303346
def init_test_case(self):
304347
self.pad = [1, 1]

0 commit comments

Comments
 (0)