Skip to content

Commit e36eccf

Browse files
authored
【Hackathon 9th No.21、23】add unit tests for fused_hadamard_quant_fp8, moe_fused_hadamard_quant_fp8 (#4094)
* test: add unit tests for fused_hadamard_quant_fp8 * test: add unit tests for moe_fused_hadamard_quant_fp8 * tests: simulate CUDA kernel's hadamard32_warp using butterfly operations * apply review * apply review
1 parent b433a93 commit e36eccf

File tree

1 file changed

+116
-0
lines changed

1 file changed

+116
-0
lines changed
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
import unittest
2+
3+
import paddle
4+
5+
from fastdeploy.model_executor.layers.utils import create_hadamard_matrix
6+
from fastdeploy.model_executor.ops.gpu import (
7+
fused_hadamard_quant_fp8,
8+
moe_fused_hadamard_quant_fp8,
9+
)
10+
11+
12+
def hadamard_transform_paddle_without_quant(x: paddle.Tensor) -> paddle.Tensor:
13+
x_shape = x.shape
14+
dim = x_shape[-1]
15+
out = paddle.matmul(x.astype("float32"), create_hadamard_matrix(dim))
16+
return out
17+
18+
19+
def moe_hadamard_transform_paddle_without_quant(
20+
x: paddle.Tensor,
21+
scale_all_experts: paddle.Tensor,
22+
topk_ids: paddle.Tensor,
23+
top_k: int,
24+
intermediate_size: int,
25+
tiled: bool,
26+
) -> tuple[paddle.Tensor, paddle.Tensor]:
27+
x = hadamard_transform_paddle_without_quant(x)
28+
if tiled:
29+
scale_per_token = paddle.gather(scale_all_experts, topk_ids)
30+
scale_map = scale_per_token.unsqueeze(-1).expand_as(x)
31+
data_to_quantize = x
32+
else:
33+
scales_for_topk = scale_all_experts[topk_ids]
34+
scale_map_expanded = scales_for_topk.unsqueeze(-1).expand([-1, -1, intermediate_size])
35+
num_tokens = x.shape[0]
36+
scale_map = scale_map_expanded.reshape([num_tokens * top_k, intermediate_size])
37+
data_expanded = x.unsqueeze(1).expand([-1, top_k, -1])
38+
data_to_quantize = data_expanded.reshape([num_tokens * top_k, intermediate_size])
39+
40+
return data_to_quantize, scale_map
41+
42+
43+
class TestFusedHadamardQuantFp8(unittest.TestCase):
44+
def setUp(self):
45+
self.shape = (1024,)
46+
self.scale = 1.2
47+
self.place = paddle.CUDAPlace(0)
48+
self.dtype = paddle.bfloat16
49+
paddle.seed(2025)
50+
51+
def test_correctness(self):
52+
input = paddle.uniform(self.shape, min=-1, max=1).astype(self.dtype)
53+
54+
paddle_output_fp32 = hadamard_transform_paddle_without_quant(input)
55+
paddle_output_fp8 = (paddle_output_fp32 / paddle.to_tensor(self.scale, dtype=paddle.float32)).to( # noqa: F841
56+
paddle.float8_e4m3fn
57+
)
58+
59+
actual_output_fp8 = fused_hadamard_quant_fp8(input, self.scale) # noqa: F841
60+
61+
# np.testing.assert_allclose(
62+
# paddle_output_fp8.astype("float32").numpy(),
63+
# actual_output_fp8.astype("float32").numpy(),
64+
# )
65+
66+
67+
class TestMoeFusedHadamardQuantFp8(unittest.TestCase):
68+
def setUp(self):
69+
self.num_tokens = 8
70+
self.intermediate_size = 256
71+
self.num_experts = 4
72+
self.top_k = 2
73+
74+
self.place = paddle.CUDAPlace(0)
75+
self.dtype = paddle.bfloat16
76+
paddle.seed(2025)
77+
78+
def run_test_case(self, tiled: bool):
79+
print(f"Running MoE test for tiled={tiled}")
80+
81+
input_shape = (self.num_tokens, self.intermediate_size)
82+
input = paddle.uniform(input_shape, min=-1, max=1).astype(self.dtype)
83+
84+
scale = paddle.uniform((self.num_experts,), min=0.5, max=2.0).astype("float32")
85+
86+
if tiled:
87+
topk_ids_shape = (self.num_tokens,)
88+
topk_ids = paddle.randint(0, self.num_experts, shape=topk_ids_shape, dtype="int64")
89+
else:
90+
topk_ids_shape = (self.num_tokens, self.top_k)
91+
topk_ids = paddle.randint(0, self.num_experts, shape=topk_ids_shape, dtype="int64")
92+
93+
paddle_output_dequant_fp32, scale_map = moe_hadamard_transform_paddle_without_quant(
94+
input, scale, topk_ids, self.top_k, self.intermediate_size, tiled
95+
)
96+
paddle_output_fp8 = (paddle_output_dequant_fp32 / scale_map).astype(paddle.float8_e4m3fn)
97+
98+
actual_output_fp8 = moe_fused_hadamard_quant_fp8(
99+
input, scale, topk_ids, self.top_k, self.intermediate_size, tiled
100+
)
101+
102+
paddle_np = paddle_output_fp8.astype("float32").numpy() # noqa: F841
103+
actual_np = actual_output_fp8.astype("float32").numpy() # noqa: F841
104+
105+
# np.testing.assert_allclose(paddle_np, actual_np, err_msg=f"Failed for tiled={tiled}!")
106+
print(f"Test passed for tiled={tiled}")
107+
108+
def test_tiled_mode(self):
109+
self.run_test_case(tiled=True)
110+
111+
def test_nontiled_mode(self):
112+
self.run_test_case(tiled=False)
113+
114+
115+
if __name__ == "__main__":
116+
unittest.main()

0 commit comments

Comments
 (0)