|
| 1 | +# SPDX-License-Identifier: Apache-2.0 |
| 2 | +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project |
| 3 | + |
| 4 | +import pytest |
| 5 | +import torch |
| 6 | + |
| 7 | +from vllm.platforms import current_platform |
| 8 | + |
| 9 | +from ..utils import multi_gpu_test |
| 10 | + |
| 11 | +if not current_platform.is_cuda(): |
| 12 | + pytest.skip("CUDA only test", allow_module_level=True) |
| 13 | + |
| 14 | + |
| 15 | +def test_nccl_fp8_dtype_support(): |
| 16 | + """Test that NCCL wrapper supports FP8 datatypes""" |
| 17 | + from vllm.distributed.device_communicators.pynccl_wrapper import ( |
| 18 | + ncclDataTypeEnum) |
| 19 | + |
| 20 | + # Test FP8 E4M3 |
| 21 | + assert hasattr(ncclDataTypeEnum, 'ncclFp8E4M3') |
| 22 | + assert ncclDataTypeEnum.ncclFp8E4M3 == 10 |
| 23 | + |
| 24 | + # Test FP8 E5M2 |
| 25 | + assert hasattr(ncclDataTypeEnum, 'ncclFp8E5M2') |
| 26 | + assert ncclDataTypeEnum.ncclFp8E5M2 == 11 |
| 27 | + |
| 28 | + # Test from_torch mapping |
| 29 | + assert ncclDataTypeEnum.from_torch( |
| 30 | + torch.float8_e4m3fn) == ncclDataTypeEnum.ncclFp8E4M3 |
| 31 | + assert ncclDataTypeEnum.from_torch( |
| 32 | + torch.float8_e5m2) == ncclDataTypeEnum.ncclFp8E5M2 |
| 33 | + |
| 34 | + |
| 35 | +def test_custom_ops_registered(): |
| 36 | + """Test that custom FP8 ops are registered""" |
| 37 | + # Import to trigger registration |
| 38 | + |
| 39 | + # Check that ops are registered |
| 40 | + assert hasattr(torch.ops.vllm, 'vllm_quantize_fp8') |
| 41 | + assert hasattr(torch.ops.vllm, 'vllm_all_gather_fp8') |
| 42 | + |
| 43 | + # Check that default variants exist |
| 44 | + assert hasattr(torch.ops.vllm.vllm_quantize_fp8, 'default') |
| 45 | + assert hasattr(torch.ops.vllm.vllm_all_gather_fp8, 'default') |
| 46 | + |
| 47 | + |
| 48 | +def test_fp8_quantization_op(): |
| 49 | + """Test FP8 quantization custom op""" |
| 50 | + from vllm.compilation.fp8_collective_ops import vllm_quantize_fp8 |
| 51 | + |
| 52 | + # Create test tensor |
| 53 | + x = torch.randn(16, 32, dtype=torch.bfloat16, device='cuda') |
| 54 | + |
| 55 | + # Quantize |
| 56 | + x_fp8, scale_inv = vllm_quantize_fp8(x) |
| 57 | + |
| 58 | + # Check output types |
| 59 | + assert x_fp8.dtype == torch.float8_e4m3fn |
| 60 | + assert scale_inv.dtype == torch.float32 |
| 61 | + |
| 62 | + # Check shapes |
| 63 | + assert x_fp8.shape == x.shape |
| 64 | + assert scale_inv.numel() == 1 # per-tensor scale |
| 65 | + |
| 66 | + # Check dequantization (approximately recovers original) |
| 67 | + x_dequant = x_fp8.to(torch.bfloat16) * scale_inv |
| 68 | + torch.testing.assert_close(x_dequant, x, rtol=0.1, atol=0.1) |
| 69 | + |
| 70 | + |
| 71 | +def fp8_allgather_worker(local_rank: int, world_size: int): |
| 72 | + """Worker function for multi-GPU FP8 AllGather test""" |
| 73 | + from vllm.compilation.fp8_collective_ops import vllm_all_gather_fp8 |
| 74 | + from vllm.distributed import (get_tp_group, init_distributed_environment, |
| 75 | + initialize_model_parallel) |
| 76 | + from vllm.utils import update_environment_variables |
| 77 | + |
| 78 | + device = torch.device(f"cuda:{local_rank}") |
| 79 | + torch.cuda.set_device(device) |
| 80 | + |
| 81 | + update_environment_variables({ |
| 82 | + 'RANK': str(local_rank), |
| 83 | + 'LOCAL_RANK': str(local_rank), |
| 84 | + 'WORLD_SIZE': str(world_size), |
| 85 | + 'MASTER_ADDR': 'localhost', |
| 86 | + 'MASTER_PORT': '29501', |
| 87 | + }) |
| 88 | + |
| 89 | + # Initialize distributed |
| 90 | + init_distributed_environment() |
| 91 | + initialize_model_parallel(tensor_model_parallel_size=world_size) |
| 92 | + |
| 93 | + # Create test tensor (generate as BF16 then convert to FP8) |
| 94 | + x = torch.randn(8, 16, dtype=torch.bfloat16, |
| 95 | + device='cuda').to(torch.float8_e4m3fn) |
| 96 | + |
| 97 | + # All-gather |
| 98 | + tp_group = get_tp_group() |
| 99 | + gathered = vllm_all_gather_fp8(x, |
| 100 | + dim=0, |
| 101 | + world_size=tp_group.world_size, |
| 102 | + group_name=tp_group.unique_name) |
| 103 | + |
| 104 | + # Check shape |
| 105 | + expected_shape = (8 * tp_group.world_size, 16) |
| 106 | + assert gathered.shape == expected_shape |
| 107 | + print( |
| 108 | + f"Rank {local_rank}: ✅ FP8 AllGather op test passed! Shape: {gathered.shape}" |
| 109 | + ) |
| 110 | + |
| 111 | + |
| 112 | +@multi_gpu_test(num_gpus=2) |
| 113 | +def test_fp8_allgather_op(): |
| 114 | + """Test FP8 all-gather custom op (requires multi-GPU)""" |
| 115 | + |
| 116 | + def run_torch_spawn(fn, nprocs): |
| 117 | + torch.multiprocessing.spawn(fn, args=(nprocs, ), nprocs=nprocs) |
| 118 | + |
| 119 | + run_torch_spawn(fp8_allgather_worker, 2) |
| 120 | + |
| 121 | + |
| 122 | +def test_fp8_allgather_pass_init(): |
| 123 | + """Test FP8 AllGather pass initialization""" |
| 124 | + pytest.skip( |
| 125 | + "Requires distributed initialization - test manually with multi-GPU") |
| 126 | + |
| 127 | + |
| 128 | +def test_fp8_allgather_pattern_fake(): |
| 129 | + """Test pattern with fake mode (no actual distributed execution)""" |
| 130 | + pytest.skip( |
| 131 | + "Pattern registration requires valid TP group - test manually with multi-GPU" |
| 132 | + ) |
| 133 | + |
| 134 | + |
| 135 | +def fp8_allgather_correctness_worker(local_rank: int, world_size: int): |
| 136 | + """Worker function for FP8 AllGather numerical correctness test""" |
| 137 | + from vllm.compilation.fp8_collective_ops import (vllm_all_gather_fp8, |
| 138 | + vllm_quantize_fp8) |
| 139 | + from vllm.distributed import (get_tp_group, init_distributed_environment, |
| 140 | + initialize_model_parallel, |
| 141 | + tensor_model_parallel_all_gather) |
| 142 | + from vllm.utils import update_environment_variables |
| 143 | + |
| 144 | + device = torch.device(f"cuda:{local_rank}") |
| 145 | + torch.cuda.set_device(device) |
| 146 | + |
| 147 | + update_environment_variables({ |
| 148 | + 'RANK': str(local_rank), |
| 149 | + 'LOCAL_RANK': str(local_rank), |
| 150 | + 'WORLD_SIZE': str(world_size), |
| 151 | + 'MASTER_ADDR': 'localhost', |
| 152 | + 'MASTER_PORT': '29502', |
| 153 | + }) |
| 154 | + |
| 155 | + # Initialize distributed |
| 156 | + init_distributed_environment() |
| 157 | + initialize_model_parallel(tensor_model_parallel_size=world_size) |
| 158 | + |
| 159 | + # Create test tensor |
| 160 | + x = torch.randn(16, 32, dtype=torch.bfloat16, device='cuda') |
| 161 | + |
| 162 | + # Method 1: Direct AllGather (baseline, default dim=-1) |
| 163 | + gathered_direct = tensor_model_parallel_all_gather(x) |
| 164 | + |
| 165 | + # Method 2: FP8 Optimized AllGather (use same dim=-1) |
| 166 | + x_fp8, scale_inv = vllm_quantize_fp8(x) |
| 167 | + tp_group = get_tp_group() |
| 168 | + gathered_fp8 = vllm_all_gather_fp8(x_fp8, |
| 169 | + dim=-1, |
| 170 | + world_size=tp_group.world_size, |
| 171 | + group_name=tp_group.unique_name) |
| 172 | + |
| 173 | + # All-gather scales (reshape scalar to 1D first) |
| 174 | + scale_inv_1d = scale_inv.view(1) |
| 175 | + scale_gathered = tensor_model_parallel_all_gather(scale_inv_1d, dim=0) |
| 176 | + |
| 177 | + # Dequantize: apply each rank's scale to its chunk |
| 178 | + # gathered_fp8 has shape [16, 32*world_size], scale_gathered has shape [world_size] |
| 179 | + # Need to broadcast scale to match each chunk along dim=-1 |
| 180 | + chunk_size = x.shape[-1] |
| 181 | + scale_expanded = torch.repeat_interleave(scale_gathered, chunk_size).view( |
| 182 | + 1, -1).to(torch.bfloat16) |
| 183 | + gathered_opt = gathered_fp8.to(torch.bfloat16) * scale_expanded |
| 184 | + |
| 185 | + # Check correctness (allow for FP8 quantization error) |
| 186 | + torch.testing.assert_close(gathered_opt, |
| 187 | + gathered_direct, |
| 188 | + rtol=0.05, |
| 189 | + atol=0.05) |
| 190 | + print( |
| 191 | + f"Rank {local_rank}: ✅ FP8 AllGather numerical correctness test passed!" |
| 192 | + ) |
| 193 | + |
| 194 | + |
| 195 | +@multi_gpu_test(num_gpus=2) |
| 196 | +def test_fp8_allgather_numerical_correctness(): |
| 197 | + """Test end-to-end numerical correctness of FP8 AllGather optimization""" |
| 198 | + |
| 199 | + def run_torch_spawn(fn, nprocs): |
| 200 | + torch.multiprocessing.spawn(fn, args=(nprocs, ), nprocs=nprocs) |
| 201 | + |
| 202 | + run_torch_spawn(fp8_allgather_correctness_worker, 2) |
| 203 | + |
| 204 | + |
| 205 | +def test_pass_config_has_flag(): |
| 206 | + """Test that PassConfig has enable_fp8_allgather_opt flag""" |
| 207 | + from vllm.config import PassConfig |
| 208 | + |
| 209 | + config = PassConfig(enable_fp8_allgather_opt=True) |
| 210 | + assert config.enable_fp8_allgather_opt is True |
| 211 | + |
| 212 | + config = PassConfig(enable_fp8_allgather_opt=False) |
| 213 | + assert config.enable_fp8_allgather_opt is False |
| 214 | + |
| 215 | + # Default should be False |
| 216 | + config = PassConfig() |
| 217 | + assert config.enable_fp8_allgather_opt is False |
0 commit comments