|
3 | 3 |
|
4 | 4 | import flashinfer
|
5 | 5 | import flashinfer.triton
|
6 |
| -from flashinfer.utils import get_compute_capability |
| 6 | +from flashinfer.utils import GPUArchitectureError |
7 | 7 |
|
8 | 8 |
|
9 | 9 | @pytest.mark.parametrize("seq_len", [2048])
|
10 | 10 | @pytest.mark.parametrize("num_heads", [32])
|
11 | 11 | @pytest.mark.parametrize("head_dim", [128])
|
12 | 12 | def test_merge_state(seq_len, num_heads, head_dim):
|
13 |
| - compute_capability = get_compute_capability(torch.device(device="cuda")) |
14 |
| - if compute_capability[0] != 9: |
15 |
| - pytest.skip("These tests are only guaranteed to work on Hopper GPUs.") |
| 13 | + try: |
| 14 | + va = torch.randn(seq_len, num_heads, head_dim).half().to("cuda:0") |
| 15 | + sa = torch.randn(seq_len, num_heads, dtype=torch.float32).to("cuda:0") |
| 16 | + vb = torch.randn(seq_len, num_heads, head_dim).half().to("cuda:0") |
| 17 | + sb = torch.randn(seq_len, num_heads, dtype=torch.float32).to("cuda:0") |
| 18 | + v_merged, s_merged = flashinfer.triton.cascade.merge_state(va, sa, vb, sb) |
| 19 | + v_merged_std, s_merged_std = flashinfer.merge_state(va, sa, vb, sb) |
16 | 20 |
|
17 |
| - va = torch.randn(seq_len, num_heads, head_dim).half().to("cuda:0") |
18 |
| - sa = torch.randn(seq_len, num_heads, dtype=torch.float32).to("cuda:0") |
19 |
| - vb = torch.randn(seq_len, num_heads, head_dim).half().to("cuda:0") |
20 |
| - sb = torch.randn(seq_len, num_heads, dtype=torch.float32).to("cuda:0") |
21 |
| - v_merged, s_merged = flashinfer.triton.cascade.merge_state(va, sa, vb, sb) |
22 |
| - v_merged_std, s_merged_std = flashinfer.merge_state(va, sa, vb, sb) |
23 |
| - |
24 |
| - assert torch.allclose(v_merged, v_merged_std, atol=1e-2) |
25 |
| - assert torch.allclose(s_merged, s_merged_std, atol=1e-2) |
| 21 | + assert torch.allclose(v_merged, v_merged_std, atol=1e-2) |
| 22 | + assert torch.allclose(s_merged, s_merged_std, atol=1e-2) |
| 23 | + except GPUArchitectureError as e: |
| 24 | + pytest.skip(e.msg) |
26 | 25 |
|
27 | 26 |
|
28 | 27 | @pytest.mark.parametrize("seq_len", [2048])
|
29 | 28 | @pytest.mark.parametrize("num_heads", [32])
|
30 | 29 | @pytest.mark.parametrize("head_dim", [128])
|
31 | 30 | def test_merge_state_in_place(seq_len, num_heads, head_dim):
|
32 |
| - compute_capability = get_compute_capability(torch.device(device="cuda")) |
33 |
| - if compute_capability[0] != 9: |
34 |
| - pytest.skip("These tests are only guaranteed to work on Hopper GPUs.") |
| 31 | + try: |
| 32 | + v = torch.randn(seq_len, num_heads, head_dim).half() |
| 33 | + v_std = v.clone() |
| 34 | + v, v_std = v.to("cuda:0"), v_std.to("cuda:0") |
| 35 | + s = torch.randn(seq_len, num_heads, dtype=torch.float32) |
| 36 | + s_std = s.clone() |
| 37 | + s, s_std = s.to("cuda:0"), s_std.to("cuda:0") |
| 38 | + v_other = torch.randn(seq_len, num_heads, head_dim).half().to("cuda:0") |
| 39 | + s_other = torch.randn(seq_len, num_heads, dtype=torch.float32).to("cuda:0") |
| 40 | + flashinfer.merge_state_in_place(v_std, s_std, v_other, s_other) |
| 41 | + flashinfer.triton.cascade.merge_state_in_place(v, s, v_other, s_other) |
35 | 42 |
|
36 |
| - v = torch.randn(seq_len, num_heads, head_dim).half() |
37 |
| - v_std = v.clone() |
38 |
| - v, v_std = v.to("cuda:0"), v_std.to("cuda:0") |
39 |
| - s = torch.randn(seq_len, num_heads, dtype=torch.float32) |
40 |
| - s_std = s.clone() |
41 |
| - s, s_std = s.to("cuda:0"), s_std.to("cuda:0") |
42 |
| - v_other = torch.randn(seq_len, num_heads, head_dim).half().to("cuda:0") |
43 |
| - s_other = torch.randn(seq_len, num_heads, dtype=torch.float32).to("cuda:0") |
44 |
| - flashinfer.merge_state_in_place(v_std, s_std, v_other, s_other) |
45 |
| - flashinfer.triton.cascade.merge_state_in_place(v, s, v_other, s_other) |
| 43 | + assert torch.allclose(v, v_std, atol=1e-2) |
| 44 | + assert torch.allclose(s, s_std, atol=1e-2) |
46 | 45 |
|
47 |
| - assert torch.allclose(v, v_std, atol=1e-2) |
48 |
| - assert torch.allclose(s, s_std, atol=1e-2) |
| 46 | + except GPUArchitectureError as e: |
| 47 | + pytest.skip(e.msg) |
49 | 48 |
|
50 | 49 |
|
51 | 50 | @pytest.mark.parametrize("seq_len", [2048])
|
52 | 51 | @pytest.mark.parametrize("num_heads", [32])
|
53 | 52 | @pytest.mark.parametrize("head_dim", [128])
|
54 | 53 | @pytest.mark.parametrize("num_states", [100])
|
55 | 54 | def test_merge_states(seq_len, num_states, num_heads, head_dim):
|
56 |
| - compute_capability = get_compute_capability(torch.device(device="cuda")) |
57 |
| - if compute_capability[0] != 9: |
58 |
| - pytest.skip("These tests are only guaranteed to work on Hopper GPUs.") |
59 |
| - |
60 |
| - v = torch.randn(seq_len, num_states, num_heads, head_dim).half().to("cuda:0") |
61 |
| - s = torch.randn(seq_len, num_states, num_heads, dtype=torch.float32).to("cuda:0") |
62 |
| - v_merged_std, s_merged_std = flashinfer.merge_states(v, s) |
63 |
| - v_merged, s_merged = flashinfer.triton.cascade.merge_states(v, s) |
| 55 | + try: |
| 56 | + v = torch.randn(seq_len, num_states, num_heads, head_dim).half().to("cuda:0") |
| 57 | + s = torch.randn(seq_len, num_states, num_heads, dtype=torch.float32).to( |
| 58 | + "cuda:0" |
| 59 | + ) |
| 60 | + v_merged_std, s_merged_std = flashinfer.merge_states(v, s) |
| 61 | + v_merged, s_merged = flashinfer.triton.cascade.merge_states(v, s) |
64 | 62 |
|
65 |
| - assert torch.allclose(v_merged, v_merged_std, atol=1e-2) |
66 |
| - assert torch.allclose(s_merged, s_merged_std, atol=1e-2) |
| 63 | + assert torch.allclose(v_merged, v_merged_std, atol=1e-2) |
| 64 | + assert torch.allclose(s_merged, s_merged_std, atol=1e-2) |
| 65 | + except GPUArchitectureError as e: |
| 66 | + pytest.skip(e.msg) |
67 | 67 |
|
68 | 68 |
|
69 | 69 | @pytest.mark.parametrize("seq_len", [2048])
|
70 | 70 | @pytest.mark.parametrize("num_heads", [32])
|
71 | 71 | @pytest.mark.parametrize("head_dim", [128])
|
72 | 72 | def test_variable_length_merge_states(seq_len, num_heads, head_dim):
|
73 |
| - compute_capability = get_compute_capability(torch.device(device="cuda")) |
74 |
| - if compute_capability[0] != 9: |
75 |
| - pytest.skip("These tests are only guaranteed to work on Hopper GPUs.") |
76 |
| - |
77 |
| - max_index_sets = 512 |
78 |
| - lengths = torch.randint(low=1, high=max_index_sets, size=(seq_len,)) |
79 |
| - indptr = [0] |
80 |
| - for i in range(seq_len): |
81 |
| - indptr.append(indptr[-1] + lengths[i]) |
82 |
| - v = torch.randn(indptr[-1], num_heads, head_dim).half().to("cuda:0") |
83 |
| - s = torch.randn(indptr[-1], num_heads, dtype=torch.float32).to("cuda:0") |
84 |
| - indptr = torch.tensor(indptr, dtype=torch.int32).to("cuda:0") |
85 |
| - v_merged, s_merged = flashinfer.triton.cascade.variable_length_merge_states( |
86 |
| - v, s, indptr |
87 |
| - ) |
88 |
| - for i in range(seq_len): |
89 |
| - sub_v = v[indptr[i] : indptr[i + 1]] |
90 |
| - sub_s = s[indptr[i] : indptr[i + 1]] |
91 |
| - sub_v = torch.unsqueeze(sub_v, 0) |
92 |
| - sub_s = torch.unsqueeze(sub_s, 0) |
93 |
| - v_merged_std, s_merged_std = flashinfer.merge_states(sub_v, sub_s) |
94 |
| - v_merged_std = torch.squeeze(v_merged_std, 0) |
95 |
| - s_merged_std = torch.squeeze(s_merged_std, 0) |
96 |
| - assert v_merged[i].shape == v_merged_std.shape |
97 |
| - assert torch.allclose(v_merged[i], v_merged_std, atol=1e-2) |
98 |
| - assert torch.allclose(s_merged[i], s_merged_std, atol=1e-2) |
| 73 | + try: |
| 74 | + max_index_sets = 512 |
| 75 | + lengths = torch.randint(low=1, high=max_index_sets, size=(seq_len,)) |
| 76 | + indptr = [0] |
| 77 | + for i in range(seq_len): |
| 78 | + indptr.append(indptr[-1] + lengths[i]) |
| 79 | + v = torch.randn(indptr[-1], num_heads, head_dim).half().to("cuda:0") |
| 80 | + s = torch.randn(indptr[-1], num_heads, dtype=torch.float32).to("cuda:0") |
| 81 | + indptr = torch.tensor(indptr, dtype=torch.int32).to("cuda:0") |
| 82 | + v_merged, s_merged = flashinfer.triton.cascade.variable_length_merge_states( |
| 83 | + v, s, indptr |
| 84 | + ) |
| 85 | + for i in range(seq_len): |
| 86 | + sub_v = v[indptr[i] : indptr[i + 1]] |
| 87 | + sub_s = s[indptr[i] : indptr[i + 1]] |
| 88 | + sub_v = torch.unsqueeze(sub_v, 0) |
| 89 | + sub_s = torch.unsqueeze(sub_s, 0) |
| 90 | + v_merged_std, s_merged_std = flashinfer.merge_states(sub_v, sub_s) |
| 91 | + v_merged_std = torch.squeeze(v_merged_std, 0) |
| 92 | + s_merged_std = torch.squeeze(s_merged_std, 0) |
| 93 | + assert v_merged[i].shape == v_merged_std.shape |
| 94 | + assert torch.allclose(v_merged[i], v_merged_std, atol=1e-2) |
| 95 | + assert torch.allclose(s_merged[i], s_merged_std, atol=1e-2) |
| 96 | + except GPUArchitectureError as e: |
| 97 | + pytest.skip(e.msg) |
0 commit comments