Skip to content

Commit 4f1aa5b

Browse files
committed
add test cases
1 parent af37838 commit 4f1aa5b

File tree

2 files changed

+84
-6
lines changed

2 files changed

+84
-6
lines changed

paddle/math/float16.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ limitations under the License. */
2323
#define USE_EIGEN
2424

2525
#ifdef USE_EIGEN // delete this #if macro
26-
#include "Eigen/src/Core/arch/CUDA/Half.h"
26+
#include "unsupported/Eigen/CXX11/Tensor"
2727
#endif
2828

2929
#ifdef __GNUC__
@@ -126,7 +126,7 @@ struct PADDLE_ALIGN(2) float16 {
126126
// According to gcc, __fp16 can only be used as an argument to fp16
127127
// intrinsic defined in arm_neon.h or as a storage type. It cannot
128128
// be used as a formal function argument.
129-
// TODO (kexinzhao): test it on RPI
129+
// TODO(kexinzhao): test it on RPI
130130
PADDLE_HOSTDEVICE inline float16(const float16_t* h) {
131131
x = *reinterpret_cast<uint16_t*>(h);
132132
}
@@ -564,7 +564,7 @@ PADDLE_HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) {
564564

565565
namespace fp16_impl {
566566

567-
Union Bits {
567+
union Bits {
568568
float f;
569569
int32_t si;
570570
uint32_t ui;
@@ -584,7 +584,7 @@ constexpr int32_t maxC = maxN >> shift;
584584
constexpr int32_t minC = minN >> shift;
585585
constexpr int32_t sigC = sigN >> shiftSign;
586586

587-
const int32_t mulN = 0x52000000; //(1 << 23) / minN
587+
const int32_t mulN = 0x52000000; // (1 << 23) / minN
588588
const int32_t mulC = 0x33800000; // minN / (1 << (23 - shift))
589589
const int32_t subC = 0x003FF; // max flt32 subnormal downshifted
590590
const int32_t norC = 0x00400; // min flt32 normal downshifted
@@ -693,7 +693,7 @@ PADDLE_HOSTDEVICE inline float half_to_float(float16 h) {
693693
// Conversion routine adapted from
694694
// http://stackoverflow.com/questions/1659440/32-bit-to-16-bit-floating-point-conversion
695695
Bits v;
696-
v.ui = x;
696+
v.ui = h.x;
697697
int32_t sign = v.si & sigC;
698698
v.si ^= sign;
699699
sign <<= shiftSign;
@@ -711,6 +711,6 @@ PADDLE_HOSTDEVICE inline float half_to_float(float16 h) {
711711
#endif
712712
}
713713

714-
} // namespace half_impl
714+
} // namespace fp16_impl
715715

716716
} // namespace paddle

paddle/math/tests/test_float16.cpp

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
Licensed under the Apache License, Version 2.0 (the "License");
3+
you may not use this file except in compliance with the License.
4+
You may obtain a copy of the License at
5+
http://www.apache.org/licenses/LICENSE-2.0
6+
Unless required by applicable law or agreed to in writing, software
7+
distributed under the License is distributed on an "AS IS" BASIS,
8+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9+
See the License for the specific language governing permissions and
10+
limitations under the License. */
11+
12+
#include <gtest/gtest.h>
13+
#include "paddle/math/float16.h"
14+
15+
namespace paddle {
16+
17+
#ifdef PADDLE_CUDA_FP16
18+
TEST(float16, gpu) {
19+
// Conversion to and from cuda half
20+
float16 v1 = half(float16(1.0f));
21+
EXPECT_EQ(v1.x, 0x3c00);
22+
23+
// Conversion to and from Eigen::half
24+
float16 v2 = Eigen::half(float16(0.5f));
25+
EXPECT_EQ(v2.x, 0x3800);
26+
27+
// Conversion from float
28+
EXPECT_EQ(float16(1.0f).x, 0x3c00);
29+
EXPECT_EQ(float16(0.5f).x, 0x3800);
30+
EXPECT_EQ(float16(0.33333f).x, 0x3555);
31+
EXPECT_EQ(float16(0.0f).x, 0x0000);
32+
EXPECT_EQ(float16(-0.0f).x, 0x8000);
33+
EXPECT_EQ(float16(65504.0f).x, 0x7bff);
34+
EXPECT_EQ(float16(65536.0f).x, 0x7c00);
35+
36+
// Conversion from double
37+
38+
// Conversion from int
39+
40+
// Conversion from bool
41+
}
42+
43+
TEST(float16, arithmetic_gpu) { EXPECT_EQ(float(float16(2) + float16(2)), 4); }
44+
45+
TEST(float16, comparison_gpu) { EXPECT_TRUE(float16(1.0f) > float16(0.5f)); }
46+
#endif
47+
48+
TEST(float16, conversion_cpu) {
49+
// Conversion to and from Eigen::half
50+
EXPECT_EQ(float16(Eigen::half(float16(1.0f))).x, 0x3c00);
51+
EXPECT_EQ(float16(Eigen::half(float16(0.5f))).x, 0x3800);
52+
EXPECT_EQ(float16(Eigen::half(float16(0.33333f))).x, 0x3555);
53+
EXPECT_EQ(float16(Eigen::half(float16(0.0f))).x, 0x0000);
54+
EXPECT_EQ(float16(Eigen::half(float16(-0.0f))).x, 0x8000);
55+
EXPECT_EQ(float16(Eigen::half(float16(65504.0f))).x, 0x7bff);
56+
EXPECT_EQ(float16(Eigen::half(float16(65536.0f))).x, 0x7c00);
57+
58+
// Conversion from float
59+
EXPECT_EQ(float16(1.0f).x, 0x3c00);
60+
EXPECT_EQ(float16(0.5f).x, 0x3800);
61+
EXPECT_EQ(float16(0.33333f).x, 0x3555);
62+
EXPECT_EQ(float16(0.0f).x, 0x0000);
63+
EXPECT_EQ(float16(-0.0f).x, 0x8000);
64+
EXPECT_EQ(float16(65504.0f).x, 0x7bff);
65+
EXPECT_EQ(float16(65536.0f).x, 0x7c00);
66+
67+
// Conversion from double
68+
69+
// Conversion from int
70+
71+
// Conversion from bool
72+
}
73+
74+
TEST(float16, arithmetic_cpu) { EXPECT_EQ(float(float16(2) + float16(2)), 4); }
75+
76+
TEST(float16, comparison_cpu) { EXPECT_TRUE(float16(1.0f) > float16(0.5f)); }
77+
78+
} // namespace paddle

0 commit comments

Comments
 (0)