Skip to content

Commit 1cffe62

Browse files
author
root
committed
add test for prefix cache feature of deepseek
Signed-off-by: root <root@hostname-2pbfv.foreman.pxe>
1 parent 0637e8f commit 1cffe62

File tree

1 file changed

+124
-0
lines changed

1 file changed

+124
-0
lines changed
Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
2+
# Copyright 2023 The vLLM team.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# This file is a part of the vllm-ascend project.
16+
#
17+
import json
18+
19+
import pytest
20+
from vllm.utils import get_open_port
21+
22+
from tests.e2e.conftest import RemoteOpenAIServer
23+
from tools.aisbench import get_TTFT, run_aisbench_cases
24+
25+
MODELS = [
26+
"vllm-ascend/DeepSeek-R1-0528-W8A8",
27+
]
28+
29+
aisbench_warm_up = [{
30+
"case_type": "performance",
31+
"dataset_path": "vllm-ascend/GSM8K-in1024-bs210",
32+
"request_conf": "vllm_api_stream_chat",
33+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
34+
"num_prompts": 210,
35+
"max_out_len": 2,
36+
"batch_size": 1000,
37+
"baseline": 0,
38+
"threshold": 0.97
39+
}]
40+
41+
aisbench_cases0 = [{
42+
"case_type": "performance",
43+
"dataset_path": "vllm-ascend/prefix0-in3500-bs210",
44+
"request_conf": "vllm_api_stream_chat",
45+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
46+
"num_prompts": 210,
47+
"max_out_len": 1500,
48+
"batch_size": 18,
49+
"baseline": 1,
50+
"threshold": 0.97
51+
}]
52+
53+
aisbench_cases75 = [{
54+
"case_type": "performance",
55+
"dataset_path": "vllm-ascend/prefix75-in3500-bs210",
56+
"request_conf": "vllm_api_stream_chat",
57+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
58+
"num_prompts": 210,
59+
"max_out_len": 1500,
60+
"batch_size": 18,
61+
"baseline": 1,
62+
"threshold": 0.97
63+
}]
64+
65+
66+
@pytest.mark.asyncio
67+
@pytest.mark.parametrize("model", MODELS)
68+
async def test_models(model: str) -> None:
69+
port = get_open_port()
70+
env_dict = {
71+
"OMP_NUM_THREADS": "10",
72+
"OMP_PROC_BIND": "false",
73+
"HCCL_BUFFSIZE": "1024",
74+
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
75+
"VLLM_USE_V1": "1"
76+
}
77+
additional_config = {
78+
"ascend_scheduler_config": {
79+
"enabled": False
80+
},
81+
"torchair_graph_config": {
82+
"enabled": True,
83+
"enable_multistream_moe": False,
84+
"enable_multistream_mla": True,
85+
"graph_batch_size": [16],
86+
"use_cached_graph": True
87+
},
88+
"chunked_prefill_for_mla": True,
89+
"enable_weight_nz_layout": True
90+
}
91+
speculative_config = {
92+
"num_speculative_tokens": 1,
93+
"method": "deepseek_mtp"
94+
}
95+
server_args = [
96+
"--quantization", "ascend", "--data-parallel-size", "2",
97+
"--tensor-parallel-size", "8", "--enable-expert-parallel", "--port",
98+
str(port), "--seed", "1024", "--max-model-len", "5200",
99+
"--max-num-batched-tokens", "4096", "--max-num-seqs", "16",
100+
"--trust-remote-code", "--gpu-memory-utilization", "0.9",
101+
"--additional-config",
102+
json.dumps(additional_config), "--speculative-config",
103+
json.dumps(speculative_config)
104+
]
105+
with RemoteOpenAIServer(model,
106+
server_args,
107+
server_port=port,
108+
env_dict=env_dict,
109+
auto_port=False):
110+
run_aisbench_cases(model, port, aisbench_warm_up)
111+
result = run_aisbench_cases(model, port, aisbench_cases0)
112+
TTFT0 = get_TTFT(result)
113+
with RemoteOpenAIServer(model,
114+
server_args,
115+
server_port=port,
116+
env_dict=env_dict,
117+
auto_port=False):
118+
run_aisbench_cases(model, port, aisbench_warm_up)
119+
result = run_aisbench_cases(model, port, aisbench_cases75)
120+
TTFT75 = get_TTFT(result)
121+
assert TTFT75 < 0.8 * TTFT0, f"The TTFT for prefix75 {TTFT75} is not less than 0.8*TTFT for prefix0 {TTFT0}."
122+
print(
123+
f"The TTFT for prefix75 {TTFT75} is less than 0.8*TTFT for prefix0 {TTFT0}."
124+
)

0 commit comments

Comments
 (0)