Skip to content

Commit c786fce

Browse files
authored
Merge pull request ROCm#580 from ROCm/upstream_merge_2025_06_16
Upstream merge 2025 06 16
2 parents 9b43d47 + bf5cec9 commit c786fce

File tree

225 files changed

+5900
-2867
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

225 files changed

+5900
-2867
lines changed

.buildkite/scripts/hardware_ci/run-cpu-test.sh

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,22 @@ numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --tag cpu-test-"$NUMA_NODE
2424
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" --tag cpu-test-"$NUMA_NODE"-avx2 --target vllm-test -f docker/Dockerfile.cpu .
2525

2626
# Run the image, setting --shm-size=4g for tensor parallel.
27-
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --env VLLM_CPU_OMP_THREADS_BIND="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"
28-
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --env VLLM_CPU_OMP_THREADS_BIND="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE"-avx2 cpu-test-"$NUMA_NODE"-avx2
27+
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --env VLLM_CPU_OMP_THREADS_BIND="$OMP_CORE_RANGE" --env VLLM_CPU_CI_ENV=1 --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"
28+
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --env VLLM_CPU_OMP_THREADS_BIND="$OMP_CORE_RANGE" --env VLLM_CPU_CI_ENV=1 --shm-size=4g --name cpu-test-"$NUMA_NODE"-avx2 cpu-test-"$NUMA_NODE"-avx2
2929

3030
function cpu_tests() {
3131
set -e
3232
export NUMA_NODE=$2
3333

34+
# list packages
35+
docker exec cpu-test-"$NUMA_NODE"-avx2 bash -c "
36+
set -e
37+
pip list"
38+
39+
docker exec cpu-test-"$NUMA_NODE" bash -c "
40+
set -e
41+
pip list"
42+
3443
# offline inference
3544
docker exec cpu-test-"$NUMA_NODE"-avx2 bash -c "
3645
set -e
@@ -72,7 +81,7 @@ function cpu_tests() {
7281
set -e
7382
python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m --dtype half &
7483
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
75-
python3 benchmarks/benchmark_serving.py \
84+
VLLM_CPU_CI_ENV=0 python3 benchmarks/benchmark_serving.py \
7685
--backend vllm \
7786
--dataset-name random \
7887
--model facebook/opt-125m \

.buildkite/test-pipeline.yaml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,11 @@ steps:
177177
- tests/tracing
178178
commands:
179179
- pytest -v -s metrics
180+
- "pip install \
181+
'opentelemetry-sdk>=1.26.0' \
182+
'opentelemetry-api>=1.26.0' \
183+
'opentelemetry-exporter-otlp>=1.26.0' \
184+
'opentelemetry-semantic-conventions-ai>=0.4.1'"
180185
- pytest -v -s tracing
181186

182187
##### fast check tests #####
@@ -305,6 +310,7 @@ steps:
305310
commands:
306311
- pytest -v -s compile/test_pass_manager.py
307312
- pytest -v -s compile/test_fusion.py
313+
- pytest -v -s compile/test_fusion_attn.py
308314
- pytest -v -s compile/test_silu_mul_quant_fusion.py
309315
- pytest -v -s compile/test_sequence_parallelism.py
310316
- pytest -v -s compile/test_async_tp.py
@@ -669,7 +675,7 @@ steps:
669675
- pytest -v -s plugins/lora_resolvers # unit tests for in-tree lora resolver plugins
670676

671677
- label: Multi-step Tests (4 GPUs) # 36min
672-
mirror_hardwares: [amdexperimental]
678+
mirror_hardwares: [amdexperimental, amdproduction]
673679
working_dir: "/vllm-workspace/tests"
674680
num_gpus: 4
675681
source_file_dependencies:

CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -542,10 +542,10 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
542542

543543
# CUTLASS MoE kernels
544544

545-
# The MoE kernel cutlass_moe_mm requires CUDA 12.3 or later (and only works
545+
# The MoE kernel cutlass_moe_mm requires CUDA 12.3 or later (and ONLY works
546546
# on Hopper). get_cutlass_(pplx_)moe_mm_data should only be compiled
547547
# if it's possible to compile MoE kernels that use its output.
548-
cuda_archs_loose_intersection(SCALED_MM_ARCHS "9.0a;10.0a" "${CUDA_ARCHS}")
548+
cuda_archs_loose_intersection(SCALED_MM_ARCHS "9.0a" "${CUDA_ARCHS}")
549549
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND SCALED_MM_ARCHS)
550550
set(SRCS "csrc/quantization/cutlass_w8a8/moe/grouped_mm_c3x.cu"
551551
"csrc/quantization/cutlass_w8a8/moe/moe_data.cu")

benchmarks/benchmark_latency.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ def run_to_completion(profile_dir: Optional[str] = None):
123123
save_to_pytorch_benchmark_format(args, results)
124124

125125

126-
if __name__ == "__main__":
126+
def create_argument_parser():
127127
parser = FlexibleArgumentParser(
128128
description="Benchmark the latency of processing a single batch of "
129129
"requests till completion."
@@ -171,6 +171,12 @@ def run_to_completion(profile_dir: Optional[str] = None):
171171
# V1 enables prefix caching by default which skews the latency
172172
# numbers. We need to disable prefix caching by default.
173173
parser.set_defaults(enable_prefix_caching=False)
174+
175+
return parser
176+
177+
178+
if __name__ == "__main__":
179+
parser = create_argument_parser()
174180
args = parser.parse_args()
175181
if args.profile and not envs.VLLM_TORCH_PROFILER_DIR:
176182
raise OSError(

benchmarks/benchmark_long_document_qa_throughput.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ def main(args):
142142
)
143143

144144

145-
if __name__ == "__main__":
145+
def create_argument_parser():
146146
parser = FlexibleArgumentParser(
147147
description="Benchmark the performance with or "
148148
"without automatic prefix caching."
@@ -192,5 +192,11 @@ def main(args):
192192
)
193193

194194
parser = EngineArgs.add_cli_args(parser)
195+
196+
return parser
197+
198+
199+
if __name__ == "__main__":
200+
parser = create_argument_parser()
195201
args = parser.parse_args()
196202
main(args)

benchmarks/benchmark_prefix_caching.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ def main(args):
218218
)
219219

220220

221-
if __name__ == "__main__":
221+
def create_argument_parser():
222222
parser = FlexibleArgumentParser(
223223
description="Benchmark the performance with or without "
224224
"automatic prefix caching."
@@ -268,5 +268,11 @@ def main(args):
268268
)
269269

270270
parser = EngineArgs.add_cli_args(parser)
271+
272+
return parser
273+
274+
275+
if __name__ == "__main__":
276+
parser = create_argument_parser()
271277
args = parser.parse_args()
272278
main(args)

benchmarks/benchmark_prioritization.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def main(args: argparse.Namespace):
161161
json.dump(results, f, indent=4)
162162

163163

164-
if __name__ == "__main__":
164+
def create_argument_parser():
165165
parser = FlexibleArgumentParser(description="Benchmark the throughput.")
166166
parser.add_argument(
167167
"--backend", type=str, choices=["vllm", "hf", "mii"], default="vllm"
@@ -204,6 +204,12 @@ def main(args: argparse.Namespace):
204204
)
205205

206206
parser = EngineArgs.add_cli_args(parser)
207+
208+
return parser
209+
210+
211+
if __name__ == "__main__":
212+
parser = create_argument_parser()
207213
args = parser.parse_args()
208214
if args.tokenizer is None:
209215
args.tokenizer = args.model

benchmarks/benchmark_serving.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -875,7 +875,7 @@ def main(args: argparse.Namespace):
875875
save_to_pytorch_benchmark_format(args, result_json, file_name)
876876

877877

878-
if __name__ == "__main__":
878+
def create_argument_parser():
879879
parser = FlexibleArgumentParser(
880880
description="Benchmark the online serving throughput."
881881
)
@@ -1225,6 +1225,10 @@ def main(args: argparse.Namespace):
12251225
"script chooses a LoRA module at random.",
12261226
)
12271227

1228-
args = parser.parse_args()
1228+
return parser
1229+
12291230

1231+
if __name__ == "__main__":
1232+
parser = create_argument_parser()
1233+
args = parser.parse_args()
12301234
main(args)

benchmarks/benchmark_serving_structured_output.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -850,7 +850,7 @@ def main(args: argparse.Namespace):
850850
json.dump(results, outfile, indent=4)
851851

852852

853-
if __name__ == "__main__":
853+
def create_argument_parser():
854854
parser = FlexibleArgumentParser(
855855
description="Benchmark the online serving throughput."
856856
)
@@ -1034,5 +1034,10 @@ def main(args: argparse.Namespace):
10341034
help="Ratio of Structured Outputs requests",
10351035
)
10361036

1037+
return parser
1038+
1039+
1040+
if __name__ == "__main__":
1041+
parser = create_argument_parser()
10371042
args = parser.parse_args()
10381043
main(args)

benchmarks/benchmark_throughput.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -595,7 +595,7 @@ def validate_args(args):
595595
)
596596

597597

598-
if __name__ == "__main__":
598+
def create_argument_parser():
599599
parser = FlexibleArgumentParser(description="Benchmark the throughput.")
600600
parser.add_argument(
601601
"--backend",
@@ -717,6 +717,12 @@ def validate_args(args):
717717
)
718718

719719
parser = AsyncEngineArgs.add_cli_args(parser)
720+
721+
return parser
722+
723+
724+
if __name__ == "__main__":
725+
parser = create_argument_parser()
720726
args = parser.parse_args()
721727
if args.tokenizer is None:
722728
args.tokenizer = args.model

0 commit comments

Comments
 (0)