From 0ca6bdaea4c9144e4adf42809400b0aaeb27fb5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E5=93=B2=E7=BB=AD?= Date: Tue, 25 Nov 2025 10:02:34 +0800 Subject: [PATCH 1/4] modify nz in bf16 --- vllm_ascend/attention/mla_v1.py | 2 +- vllm_ascend/models/qwen2_5_vl.py | 4 ++-- vllm_ascend/models/qwen2_vl.py | 4 ++-- vllm_ascend/ops/common_fused_moe.py | 2 +- vllm_ascend/ops/linear.py | 3 +-- vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py | 2 +- vllm_ascend/torchair/torchair_sfa.py | 4 ++-- vllm_ascend/utils.py | 4 +++- 8 files changed, 13 insertions(+), 12 deletions(-) diff --git a/vllm_ascend/attention/mla_v1.py b/vllm_ascend/attention/mla_v1.py index c0e175c246a..5dcac6c37ee 100644 --- a/vllm_ascend/attention/mla_v1.py +++ b/vllm_ascend/attention/mla_v1.py @@ -652,7 +652,7 @@ def get_and_maybe_dequant_weights(layer: LinearBase): # Function `get_and_maybe_dequant_weights` will cast the weights to # FRACTAL_AND. So we need to cast to FRACTAL_NZ again. - if is_enable_nz(): + if is_enable_nz(self.kv_b_proj.weight.data.dtype): self.kv_b_proj.weight.data = torch_npu.npu_format_cast( self.kv_b_proj.weight.data, ACL_FORMAT_FRACTAL_NZ) diff --git a/vllm_ascend/models/qwen2_5_vl.py b/vllm_ascend/models/qwen2_5_vl.py index 35ac58d0a9d..ec39b9648ca 100644 --- a/vllm_ascend/models/qwen2_5_vl.py +++ b/vllm_ascend/models/qwen2_5_vl.py @@ -284,7 +284,7 @@ def pad_qkv_weight(self, data): dim=2) qkv_weight_final = qkv_weight_padded.reshape(-1, self.hidden_size) - if is_enable_nz(): + if is_enable_nz(qkv_weight_final.dtype): qkv_weight_final_copy = torch.empty_like(qkv_weight_final).copy_( qkv_weight_final) qkv_weight_final_copy = torch_npu.npu_format_cast( @@ -300,7 +300,7 @@ def pad_proj_weight(self, data): (0, self.half_pad_hidden_size_per_attention_head, 0, 0)).reshape( self.hidden_size, -1) - if is_enable_nz(): + if is_enable_nz(out_weight.dtype): out_weight_copy = torch.empty_like(out_weight).copy_(out_weight) out_weight_copy = torch_npu.npu_format_cast( out_weight_copy, ACL_FORMAT_FRACTAL_ND) diff --git a/vllm_ascend/models/qwen2_vl.py b/vllm_ascend/models/qwen2_vl.py index ccd461613b5..bd4828351d1 100644 --- a/vllm_ascend/models/qwen2_vl.py +++ b/vllm_ascend/models/qwen2_vl.py @@ -268,7 +268,7 @@ def pad_qkv_weight(self, data): dim=2) qkv_weight_final = qkv_weight_padded.reshape(-1, self.hidden_size) - if is_enable_nz(): + if is_enable_nz(qkv_weight_final.dtype): qkv_weight_final_copy = torch.empty_like(qkv_weight_final).copy_( qkv_weight_final) qkv_weight_final_copy = torch_npu.npu_format_cast( @@ -284,7 +284,7 @@ def pad_proj_weight(self, data): (0, self.half_pad_hidden_size_per_attention_head, 0, 0)).reshape( self.hidden_size, -1) - if is_enable_nz(): + if is_enable_nz(out_weight.dtype): out_weight_copy = torch.empty_like(out_weight).copy_(out_weight) out_weight_copy = torch_npu.npu_format_cast( out_weight_copy, ACL_FORMAT_FRACTAL_ND) diff --git a/vllm_ascend/ops/common_fused_moe.py b/vllm_ascend/ops/common_fused_moe.py index 3145b92c746..296983f633c 100644 --- a/vllm_ascend/ops/common_fused_moe.py +++ b/vllm_ascend/ops/common_fused_moe.py @@ -89,7 +89,7 @@ def process_weights_after_loading(self, layer): w2_data = self._maybe_pad_weight(layer.w2_weight.data) layer.w2_weight = torch.nn.Parameter(w2_data, requires_grad=False) - if not is_310p() and is_enable_nz(): + if not is_310p() and is_enable_nz(layer.w13_weight.data.type): layer.w13_weight.data = torch_npu.npu_format_cast( layer.w13_weight.data, ACL_FORMAT_FRACTAL_NZ) layer.w2_weight.data = torch_npu.npu_format_cast( diff --git a/vllm_ascend/ops/linear.py b/vllm_ascend/ops/linear.py index eab312d5cf8..69889b700ee 100644 --- a/vllm_ascend/ops/linear.py +++ b/vllm_ascend/ops/linear.py @@ -45,8 +45,7 @@ class AscendUnquantizedLinearMethod(UnquantizedLinearMethod): def process_weights_after_loading(self, layer: torch.nn.Module) -> None: super().process_weights_after_loading(layer) - if (is_enable_nz() and layer.weight.data.dtype - in [torch.float16, torch.bfloat16]): + if (is_enable_nz(layer.weight.data.dtype)): layer.weight.data = torch_npu.npu_format_cast( layer.weight.data, ACL_FORMAT_FRACTAL_NZ) diff --git a/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py b/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py index bc0a8d35783..aeb281b7994 100644 --- a/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py +++ b/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py @@ -835,7 +835,7 @@ def process_weights_after_loading(self, layer): if self.transpose_weight: layer.weight.data = layer.weight.data.transpose(0, 1).contiguous() # cast quantized weight tensors in NZ format (29) for higher inference speed - if is_enable_nz(): + if is_enable_nz(layer.weight.data.dtype): layer.weight.data = torch_npu.npu_format_cast( layer.weight.data, 29) layer.weight_scale.data = layer.weight_scale.data.flatten() diff --git a/vllm_ascend/torchair/torchair_sfa.py b/vllm_ascend/torchair/torchair_sfa.py index 36c32247d6b..751cd2f935c 100644 --- a/vllm_ascend/torchair/torchair_sfa.py +++ b/vllm_ascend/torchair/torchair_sfa.py @@ -842,7 +842,7 @@ def _process_weights_for_fused_mlapo(self, act_dtype: torch.dtype): wd_qkv = wd_qkv.t().contiguous() wd_qkv = transdata(wd_qkv, block_size=(16, 32)).unsqueeze(0).contiguous() - if is_enable_nz(): + if is_enable_nz(wd_qkv.dtype): self.wd_qkv = torch_npu.npu_format_cast(wd_qkv, 29) kv_a_proj_deq_scl = self.kv_a_proj_with_mqa.deq_scale.clone() @@ -876,7 +876,7 @@ def _process_weights_for_fused_mlapo(self, act_dtype: torch.dtype): self.num_heads * (self.qk_nope_head_dim + self.qk_rope_head_dim), -1) wu_q = transdata(wu_q, block_size=(16, 32)).unsqueeze(0).contiguous() - if is_enable_nz(): + if is_enable_nz(wu_q.dtype): self.wu_q = torch_npu.npu_format_cast(wu_q, 29) qb_deq_scl = self.q_proj.deq_scale.data.clone() diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 52a88ecabdf..a8c8e324602 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -71,13 +71,15 @@ def is_310p(): return _IS_310P -def is_enable_nz(vllm_config: Optional[VllmConfig] = None) -> bool: +def is_enable_nz(dtype: Optional[torch.dtype] = torch.int8, vllm_config: Optional[VllmConfig] = None) -> bool: global _ENABLE_NZ if _ENABLE_NZ is None: if not vllm_config: raise ValueError( "vllm_config must be provided when _ENABLE_NZ is None") _ENABLE_NZ = envs_ascend.VLLM_ASCEND_ENABLE_NZ and vllm_config.model_config.hf_config.model_type != "qwen3_next" + if dtype in [torch.float16, torch.bfloat16]: + _ENABLE_NZ = 0 return _ENABLE_NZ From 7d1db3449d816ab2250082b60c40506899cb6cb1 Mon Sep 17 00:00:00 2001 From: mazhixin000 <76465098+mazhixin000@users.noreply.github.com> Date: Mon, 24 Nov 2025 17:23:11 +0800 Subject: [PATCH 2/4] [Doc][v11.0-dev][cherry-pick]Add single node PD disaggregation instructions (#4370) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What this PR does / why we need it? add single node PD disaggregation instructions for Qwen 2.5VL model. ### Does this PR introduce _any_ user-facing change? no --------- Signed-off-by: mazhixin Signed-off-by: mazhixin000 Co-authored-by: mazhixin Signed-off-by: 刘哲续 --- docs/source/tutorials/index.md | 1 + ...ngle_node_pd_disaggregation_llmdatadist.md | 181 ++++++++++++++++++ 2 files changed, 182 insertions(+) create mode 100644 docs/source/tutorials/single_node_pd_disaggregation_llmdatadist.md diff --git a/docs/source/tutorials/index.md b/docs/source/tutorials/index.md index da8890616c1..89eb3fe81fa 100644 --- a/docs/source/tutorials/index.md +++ b/docs/source/tutorials/index.md @@ -8,6 +8,7 @@ single_npu_multimodal single_npu_audio single_npu_qwen3_embedding single_npu_qwen3_quantization +single_node_pd_disaggregation_llmdatadist multi_npu_qwen3_next multi_npu multi_npu_moge diff --git a/docs/source/tutorials/single_node_pd_disaggregation_llmdatadist.md b/docs/source/tutorials/single_node_pd_disaggregation_llmdatadist.md new file mode 100644 index 00000000000..db1834e2a6d --- /dev/null +++ b/docs/source/tutorials/single_node_pd_disaggregation_llmdatadist.md @@ -0,0 +1,181 @@ +# Prefill-Decode Disaggregation Llmdatadist Verification (Qwen2.5-VL) + +## Getting Start + +vLLM-Ascend now supports prefill-decode (PD) disaggregation. This guide takes one-by-one steps to verify these features with constrained resources. + +Using the Qwen2.5-VL-7B-Instruct model as an example, use vllm-ascend v0.11.0rc1 (with vLLM v0.11.0) on 1 Atlas 800T A2 server to deploy the "1P1D" architecture. Assume the IP address is 192.0.0.1. + +## Verify Communication Environment + +### Verification Process + +1. Single Node Verification: + +Execute the following commands in sequence. The results must all be `success` and the status must be `UP`: + +```bash +# Check the remote switch ports +for i in {0..7}; do hccn_tool -i $i -lldp -g | grep Ifname; done +# Get the link status of the Ethernet ports (UP or DOWN) +for i in {0..7}; do hccn_tool -i $i -link -g ; done +# Check the network health status +for i in {0..7}; do hccn_tool -i $i -net_health -g ; done +# View the network detected IP configuration +for i in {0..7}; do hccn_tool -i $i -netdetect -g ; done +# View gateway configuration +for i in {0..7}; do hccn_tool -i $i -gateway -g ; done +# View NPU network configuration +cat /etc/hccn.conf +``` + +2. Get NPU IP Addresses + +```bash +for i in {0..7}; do hccn_tool -i $i -ip -g;done +``` + +## Generate Ranktable + +The rank table is a JSON file that specifies the mapping of Ascend NPU ranks to nodes. For more details, please refer to the [vllm-ascend examples](https://github.com/vllm-project/vllm-ascend/blob/main/examples/disaggregated_prefill_v1/README.md). Execute the following commands for reference. + +```shell +cd vllm-ascend/examples/disaggregate_prefill_v1/ +bash gen_ranktable.sh --ips 192.0.0.1 \ + --npus-per-node 2 --network-card-name eth0 --prefill-device-cnt 1 --decode-device-cnt 1 +``` + +If you want to run "2P1D", please set npus-per-node to 3 and prefill-device-cnt to 2. The rank table will be generated at /vllm-workspace/vllm-ascend/examples/disaggregate_prefill_v1/ranktable.json + +|Parameter | Meaning | +| --- | --- | +| --ips | Each node's local IP address (prefiller nodes should be in front of decoder nodes) | +| --npus-per-node | Each node's NPU clips | +| --network-card-name | The physical machines' NIC | +|--prefill-device-cnt | NPU clips used for prefill | +|--decode-device-cnt |NPU clips used for decode | + +## Prefiller/Decoder Deployment + +We can run the following scripts to launch a server on the prefiller/decoder NPU, respectively. + +:::::{tab-set} + +::::{tab-item} Prefiller + +```shell +export ASCEND_RT_VISIBLE_DEVICES=0 +export HCCL_IF_IP=192.0.0.1 # node ip +export GLOO_SOCKET_IFNAME="eth0" # network card name +export TP_SOCKET_IFNAME="eth0" +export HCCL_SOCKET_IFNAME="eth0" +export DISAGGREGATED_PREFILL_RANK_TABLE_PATH="/path/to/your/generated/ranktable.json" +export OMP_PROC_BIND=false +export OMP_NUM_THREADS=10 +export VLLM_ASCEND_LLMDD_RPC_PORT=5959 + +vllm serve /model/Qwen2.5-VL-7B-Instruct \ + --host 0.0.0.0 \ + --port 13700 \ + --tensor-parallel-size 1 \ + --no-enable-prefix-caching \ + --seed 1024 \ + --served-model-name qwen25vl \ + --max-model-len 40000 \ + --max-num-batched-tokens 40000 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --kv-transfer-config \ + '{"kv_connector": "LLMDataDistCMgrConnector", + "kv_buffer_device": "npu", + "kv_role": "kv_producer", + "kv_parallel_size": 1, + "kv_port": "20001", + "engine_id": "0", + "kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector" + }' +``` + +:::: + +::::{tab-item} Decoder + +```shell +export ASCEND_RT_VISIBLE_DEVICES=1 +export HCCL_IF_IP=192.0.0.1 # node ip +export GLOO_SOCKET_IFNAME="eth0" # network card name +export TP_SOCKET_IFNAME="eth0" +export HCCL_SOCKET_IFNAME="eth0" +export DISAGGREGATED_PREFILL_RANK_TABLE_PATH="/path/to/your/generated/ranktable.json" +export OMP_PROC_BIND=false +export OMP_NUM_THREADS=10 +export VLLM_ASCEND_LLMDD_RPC_PORT=5979 + +vllm serve /model/Qwen2.5-VL-7B-Instruct \ + --host 0.0.0.0 \ + --port 13701 \ + --no-enable-prefix-caching \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name qwen25vl \ + --max-model-len 40000 \ + --max-num-batched-tokens 40000 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --kv-transfer-config \ + '{"kv_connector": "LLMDataDistCMgrConnector", + "kv_buffer_device": "npu", + "kv_role": "kv_consumer", + "kv_parallel_size": 1, + "kv_port": "20001", + "engine_id": "0", + "kv_connector_module_path": "vllm_ascend.distributed.llmdatadist_c_mgr_connector" + }' +``` + +:::: + +::::: + +If you want to run "2P1D", please set ASCEND_RT_VISIBLE_DEVICES, VLLM_ASCEND_LLMDD_RPC_PORT and port to different values for each P process. + +## Example Proxy for Deployment + +Run a proxy server on the same node with the prefiller service instance. You can get the proxy program in the repository's examples: [load\_balance\_proxy\_server\_example.py](https://github.com/vllm-project/vllm-ascend/blob/main/examples/disaggregated_prefill_v1/load_balance_proxy_server_example.py) + +```shell +python load_balance_proxy_server_example.py \ + --host 192.0.0.1 \ + --port 8080 \ + --prefiller-hosts 192.0.0.1 \ + --prefiller-port 13700 \ + --decoder-hosts 192.0.0.1 \ + --decoder-ports 13701 +``` + +|Parameter | Meaning | +| --- | --- | +| --port | Port of proxy | +| --prefiller-port | All ports of prefill | +| --decoder-ports | All ports of decoder | + +## Verification + +Check service health using the proxy server endpoint. + +```shell +curl http://192.0.0.1:8080/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "qwen25vl", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": [ + {"type": "image_url", "image_url": {"url": "https://modelscope.oss-cn-beijing.aliyuncs.com/resource/qwen.png"}}, + {"type": "text", "text": "What is the text in the illustrate?"} + ]} + ], + "max_tokens": 100, + "temperature": 0 + }' +``` From a462fcd66dd95e537e4ba0a3bde61e08d9f8d9a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E5=93=B2=E7=BB=AD?= Date: Tue, 25 Nov 2025 10:02:34 +0800 Subject: [PATCH 3/4] modify nz in bf16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 刘哲续 --- vllm_ascend/attention/mla_v1.py | 2 +- vllm_ascend/models/qwen2_5_vl.py | 4 ++-- vllm_ascend/models/qwen2_vl.py | 4 ++-- vllm_ascend/ops/common_fused_moe.py | 2 +- vllm_ascend/ops/linear.py | 3 +-- vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py | 2 +- vllm_ascend/torchair/torchair_sfa.py | 4 ++-- vllm_ascend/utils.py | 4 +++- 8 files changed, 13 insertions(+), 12 deletions(-) diff --git a/vllm_ascend/attention/mla_v1.py b/vllm_ascend/attention/mla_v1.py index c0e175c246a..5dcac6c37ee 100644 --- a/vllm_ascend/attention/mla_v1.py +++ b/vllm_ascend/attention/mla_v1.py @@ -652,7 +652,7 @@ def get_and_maybe_dequant_weights(layer: LinearBase): # Function `get_and_maybe_dequant_weights` will cast the weights to # FRACTAL_AND. So we need to cast to FRACTAL_NZ again. - if is_enable_nz(): + if is_enable_nz(self.kv_b_proj.weight.data.dtype): self.kv_b_proj.weight.data = torch_npu.npu_format_cast( self.kv_b_proj.weight.data, ACL_FORMAT_FRACTAL_NZ) diff --git a/vllm_ascend/models/qwen2_5_vl.py b/vllm_ascend/models/qwen2_5_vl.py index 35ac58d0a9d..ec39b9648ca 100644 --- a/vllm_ascend/models/qwen2_5_vl.py +++ b/vllm_ascend/models/qwen2_5_vl.py @@ -284,7 +284,7 @@ def pad_qkv_weight(self, data): dim=2) qkv_weight_final = qkv_weight_padded.reshape(-1, self.hidden_size) - if is_enable_nz(): + if is_enable_nz(qkv_weight_final.dtype): qkv_weight_final_copy = torch.empty_like(qkv_weight_final).copy_( qkv_weight_final) qkv_weight_final_copy = torch_npu.npu_format_cast( @@ -300,7 +300,7 @@ def pad_proj_weight(self, data): (0, self.half_pad_hidden_size_per_attention_head, 0, 0)).reshape( self.hidden_size, -1) - if is_enable_nz(): + if is_enable_nz(out_weight.dtype): out_weight_copy = torch.empty_like(out_weight).copy_(out_weight) out_weight_copy = torch_npu.npu_format_cast( out_weight_copy, ACL_FORMAT_FRACTAL_ND) diff --git a/vllm_ascend/models/qwen2_vl.py b/vllm_ascend/models/qwen2_vl.py index ccd461613b5..bd4828351d1 100644 --- a/vllm_ascend/models/qwen2_vl.py +++ b/vllm_ascend/models/qwen2_vl.py @@ -268,7 +268,7 @@ def pad_qkv_weight(self, data): dim=2) qkv_weight_final = qkv_weight_padded.reshape(-1, self.hidden_size) - if is_enable_nz(): + if is_enable_nz(qkv_weight_final.dtype): qkv_weight_final_copy = torch.empty_like(qkv_weight_final).copy_( qkv_weight_final) qkv_weight_final_copy = torch_npu.npu_format_cast( @@ -284,7 +284,7 @@ def pad_proj_weight(self, data): (0, self.half_pad_hidden_size_per_attention_head, 0, 0)).reshape( self.hidden_size, -1) - if is_enable_nz(): + if is_enable_nz(out_weight.dtype): out_weight_copy = torch.empty_like(out_weight).copy_(out_weight) out_weight_copy = torch_npu.npu_format_cast( out_weight_copy, ACL_FORMAT_FRACTAL_ND) diff --git a/vllm_ascend/ops/common_fused_moe.py b/vllm_ascend/ops/common_fused_moe.py index 3145b92c746..296983f633c 100644 --- a/vllm_ascend/ops/common_fused_moe.py +++ b/vllm_ascend/ops/common_fused_moe.py @@ -89,7 +89,7 @@ def process_weights_after_loading(self, layer): w2_data = self._maybe_pad_weight(layer.w2_weight.data) layer.w2_weight = torch.nn.Parameter(w2_data, requires_grad=False) - if not is_310p() and is_enable_nz(): + if not is_310p() and is_enable_nz(layer.w13_weight.data.type): layer.w13_weight.data = torch_npu.npu_format_cast( layer.w13_weight.data, ACL_FORMAT_FRACTAL_NZ) layer.w2_weight.data = torch_npu.npu_format_cast( diff --git a/vllm_ascend/ops/linear.py b/vllm_ascend/ops/linear.py index eab312d5cf8..69889b700ee 100644 --- a/vllm_ascend/ops/linear.py +++ b/vllm_ascend/ops/linear.py @@ -45,8 +45,7 @@ class AscendUnquantizedLinearMethod(UnquantizedLinearMethod): def process_weights_after_loading(self, layer: torch.nn.Module) -> None: super().process_weights_after_loading(layer) - if (is_enable_nz() and layer.weight.data.dtype - in [torch.float16, torch.bfloat16]): + if (is_enable_nz(layer.weight.data.dtype)): layer.weight.data = torch_npu.npu_format_cast( layer.weight.data, ACL_FORMAT_FRACTAL_NZ) diff --git a/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py b/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py index bc0a8d35783..aeb281b7994 100644 --- a/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py +++ b/vllm_ascend/torchair/quantization/torchair_w8a8_dynamic.py @@ -835,7 +835,7 @@ def process_weights_after_loading(self, layer): if self.transpose_weight: layer.weight.data = layer.weight.data.transpose(0, 1).contiguous() # cast quantized weight tensors in NZ format (29) for higher inference speed - if is_enable_nz(): + if is_enable_nz(layer.weight.data.dtype): layer.weight.data = torch_npu.npu_format_cast( layer.weight.data, 29) layer.weight_scale.data = layer.weight_scale.data.flatten() diff --git a/vllm_ascend/torchair/torchair_sfa.py b/vllm_ascend/torchair/torchair_sfa.py index 36c32247d6b..751cd2f935c 100644 --- a/vllm_ascend/torchair/torchair_sfa.py +++ b/vllm_ascend/torchair/torchair_sfa.py @@ -842,7 +842,7 @@ def _process_weights_for_fused_mlapo(self, act_dtype: torch.dtype): wd_qkv = wd_qkv.t().contiguous() wd_qkv = transdata(wd_qkv, block_size=(16, 32)).unsqueeze(0).contiguous() - if is_enable_nz(): + if is_enable_nz(wd_qkv.dtype): self.wd_qkv = torch_npu.npu_format_cast(wd_qkv, 29) kv_a_proj_deq_scl = self.kv_a_proj_with_mqa.deq_scale.clone() @@ -876,7 +876,7 @@ def _process_weights_for_fused_mlapo(self, act_dtype: torch.dtype): self.num_heads * (self.qk_nope_head_dim + self.qk_rope_head_dim), -1) wu_q = transdata(wu_q, block_size=(16, 32)).unsqueeze(0).contiguous() - if is_enable_nz(): + if is_enable_nz(wu_q.dtype): self.wu_q = torch_npu.npu_format_cast(wu_q, 29) qb_deq_scl = self.q_proj.deq_scale.data.clone() diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 52a88ecabdf..a8c8e324602 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -71,13 +71,15 @@ def is_310p(): return _IS_310P -def is_enable_nz(vllm_config: Optional[VllmConfig] = None) -> bool: +def is_enable_nz(dtype: Optional[torch.dtype] = torch.int8, vllm_config: Optional[VllmConfig] = None) -> bool: global _ENABLE_NZ if _ENABLE_NZ is None: if not vllm_config: raise ValueError( "vllm_config must be provided when _ENABLE_NZ is None") _ENABLE_NZ = envs_ascend.VLLM_ASCEND_ENABLE_NZ and vllm_config.model_config.hf_config.model_type != "qwen3_next" + if dtype in [torch.float16, torch.bfloat16]: + _ENABLE_NZ = 0 return _ENABLE_NZ From 70274398249c7b4cfa7e0dda94378ddcce90242c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E5=93=B2=E7=BB=AD?= Date: Tue, 25 Nov 2025 10:42:27 +0800 Subject: [PATCH 4/4] modify nz in bf16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 刘哲续 --- vllm_ascend/ops/common_fused_moe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm_ascend/ops/common_fused_moe.py b/vllm_ascend/ops/common_fused_moe.py index 296983f633c..136d6a2fc9b 100644 --- a/vllm_ascend/ops/common_fused_moe.py +++ b/vllm_ascend/ops/common_fused_moe.py @@ -89,7 +89,7 @@ def process_weights_after_loading(self, layer): w2_data = self._maybe_pad_weight(layer.w2_weight.data) layer.w2_weight = torch.nn.Parameter(w2_data, requires_grad=False) - if not is_310p() and is_enable_nz(layer.w13_weight.data.type): + if not is_310p() and is_enable_nz(layer.w13_weight.data.dtype): layer.w13_weight.data = torch_npu.npu_format_cast( layer.w13_weight.data, ACL_FORMAT_FRACTAL_NZ) layer.w2_weight.data = torch_npu.npu_format_cast(