Skip to content

Commit 6fe80de

Browse files
authored
Merge pull request #124 from red-hat-data-services/rhoai-220-pyarrow
Add temp pyarrow and pandas dependencies, sync with v0.8.4.0_downstream
2 parents a387231 + 81f16fe commit 6fe80de

File tree

4 files changed

+22
-17
lines changed

4 files changed

+22
-17
lines changed

requirements/common.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,3 +43,7 @@ watchfiles # required for http server to monitor the updates of TLS files
4343
python-json-logger # Used by logging as per examples/other/logging_configuration.md
4444
scipy # Required for phi-4-multimodal-instruct
4545
ninja # Required for xgrammar, rocm, tpu, xpu
46+
opentelemetry-sdk>=1.26.0 # vllm.tracing
47+
opentelemetry-api>=1.26.0 # vllm.tracing
48+
opentelemetry-exporter-otlp>=1.26.0 # vllm.tracing
49+
opentelemetry-semantic-conventions-ai>=0.4.1 # vllm.tracing

requirements/cuda.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,11 @@ numba == 0.60.0; python_version == '3.9' # v0.61 doesn't support Python 3.9. Req
55
numba == 0.61.2; python_version > '3.9'
66

77
# Dependencies for NVIDIA GPUs
8+
pyarrow == 19.0.1 # temporary fix for missing pyarrow in ray until 2.44.2 ray release
89
ray[cgraph]>=2.43.0, !=2.44.* # Ray Compiled Graph, required for pipeline parallelism in V1.
910
torch==2.6.0
1011
torchaudio==2.6.0
1112
# These must be updated alongside torch
1213
torchvision==0.21.0 # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version
1314
xformers==0.0.29.post2; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch 2.6.0
15+
pandas==2.2.3

requirements/test.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,7 @@ psutil==6.1.0
416416
# tensorizer
417417
py==1.11.0
418418
# via pytest-forked
419-
pyarrow==18.0.0
419+
pyarrow==19.0.1
420420
# via
421421
# datasets
422422
# genai-perf

tests/models/decoder_only/vision_language/test_models.py

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -318,6 +318,21 @@
318318
use_tokenizer_eos=True,
319319
patch_hf_runner=model_utils.internvl_patch_hf_runner,
320320
),
321+
"llama4": VLMTestInfo(
322+
models=["meta-llama/Llama-4-Scout-17B-16E-Instruct"],
323+
prompt_formatter=lambda img_prompt: f"<|begin_of_text|><|header_start|>user<|header_end|>\n\n{img_prompt}<|eot|><|header_start|>assistant<|header_end|>\n\n", # noqa: E501
324+
img_idx_to_prompt=lambda _: "<|image|>",
325+
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
326+
distributed_executor_backend="mp",
327+
image_size_factors=[(.25, 0.5, 1.0)],
328+
hf_model_kwargs={"device_map": "auto"},
329+
max_model_len=8192,
330+
max_num_seqs=4,
331+
dtype="bfloat16",
332+
auto_cls=AutoModelForImageTextToText,
333+
tensor_parallel_size=4,
334+
marks=multi_gpu_marks(num_gpus=4),
335+
),
321336
"llava_next": VLMTestInfo(
322337
models=["llava-hf/llava-v1.6-mistral-7b-hf"],
323338
test_type=(VLMTestType.IMAGE, VLMTestType.CUSTOM_INPUTS),
@@ -558,22 +573,6 @@
558573
limit_mm_per_prompt={"image": 1},
559574
)],
560575
),
561-
"llama4": VLMTestInfo(
562-
models=["meta-llama/Llama-4-Scout-17B-16E-Instruct"],
563-
prompt_formatter=lambda img_prompt: f"<|begin_of_text|><|header_start|>user<|header_end|>\n\n{img_prompt}<|eot|><|header_start|>assistant<|header_end|>\n\n", # noqa: E501
564-
img_idx_to_prompt=lambda _: "<|image|>",
565-
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
566-
distributed_executor_backend="mp",
567-
image_size_factors=[(.25, 0.5, 1.0)],
568-
hf_model_kwargs={"device_map": "auto"},
569-
max_model_len=8192,
570-
max_num_seqs=4,
571-
dtype="bfloat16",
572-
auto_cls=AutoModelForImageTextToText,
573-
tensor_parallel_size=8,
574-
vllm_runner_kwargs={"gpu_memory_utilization": 0.8},
575-
marks=[large_gpu_mark(min_gb=80), multi_gpu_marks(num_gpus=8)],
576-
),
577576
}
578577
# yapf: enable
579578

0 commit comments

Comments
 (0)