Skip to content

Commit 61de3ef

Browse files
authored
[Model] Remove image mm limit for LLaMa4 (#16365)
Signed-off-by: Ye (Charlotte) Qi <[email protected]>
1 parent ec1f9c8 commit 61de3ef

File tree

2 files changed

+26
-7
lines changed

2 files changed

+26
-7
lines changed

examples/offline_inference/vision_language_multi_image.py

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,16 @@
2222
IMAGE_URLS = [
2323
"https://upload.wikimedia.org/wikipedia/commons/d/da/2015_Kaczka_krzy%C5%BCowka_w_wodzie_%28samiec%29.jpg",
2424
"https://upload.wikimedia.org/wikipedia/commons/7/77/002_The_lion_king_Snyggve_in_the_Serengeti_National_Park_Photo_by_Giles_Laurent.jpg",
25+
"https://upload.wikimedia.org/wikipedia/commons/2/26/Ultramarine_Flycatcher_%28Ficedula_superciliaris%29_Naggar%2C_Himachal_Pradesh%2C_2013_%28cropped%29.JPG",
26+
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/e5/Anim1754_-_Flickr_-_NOAA_Photo_Library_%281%29.jpg/2560px-Anim1754_-_Flickr_-_NOAA_Photo_Library_%281%29.jpg",
27+
"https://upload.wikimedia.org/wikipedia/commons/d/d4/Starfish%2C_Caswell_Bay_-_geograph.org.uk_-_409413.jpg",
28+
"https://upload.wikimedia.org/wikipedia/commons/6/69/Grapevinesnail_01.jpg",
29+
"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0b/Texas_invasive_Musk_Thistle_1.jpg/1920px-Texas_invasive_Musk_Thistle_1.jpg",
30+
"https://upload.wikimedia.org/wikipedia/commons/thumb/7/7a/Huskiesatrest.jpg/2880px-Huskiesatrest.jpg",
31+
"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Orange_tabby_cat_sitting_on_fallen_leaves-Hisashi-01A.jpg/1920px-Orange_tabby_cat_sitting_on_fallen_leaves-Hisashi-01A.jpg",
32+
"https://upload.wikimedia.org/wikipedia/commons/3/30/George_the_amazing_guinea_pig.jpg",
33+
"https://upload.wikimedia.org/wikipedia/commons/thumb/1/1f/Oryctolagus_cuniculus_Rcdo.jpg/1920px-Oryctolagus_cuniculus_Rcdo.jpg",
34+
"https://upload.wikimedia.org/wikipedia/commons/9/98/Horse-and-pony.jpg",
2535
]
2636

2737

@@ -285,8 +295,7 @@ def load_llama4(question: str, image_urls: list[str]) -> ModelRequestData:
285295

286296
engine_args = EngineArgs(
287297
model=model_name,
288-
max_model_len=8192,
289-
max_num_seqs=4,
298+
max_model_len=131072,
290299
tensor_parallel_size=8,
291300
limit_mm_per_prompt={"image": len(image_urls)},
292301
)
@@ -660,7 +669,7 @@ def run_generate(model, question: str, image_urls: list[str],
660669
llm.llm_engine.add_lora(lora_request=lora_request)
661670

662671
sampling_params = SamplingParams(temperature=0.0,
663-
max_tokens=128,
672+
max_tokens=256,
664673
stop_token_ids=req_data.stop_token_ids)
665674

666675
outputs = llm.generate(
@@ -694,7 +703,7 @@ def run_chat(model: str, question: str, image_urls: list[str],
694703
llm.llm_engine.add_lora(lora_request=lora_request)
695704

696705
sampling_params = SamplingParams(temperature=0.0,
697-
max_tokens=128,
706+
max_tokens=256,
698707
stop_token_ids=req_data.stop_token_ids)
699708
outputs = llm.chat(
700709
[{
@@ -729,10 +738,12 @@ def main(args: Namespace):
729738
method = args.method
730739
seed = args.seed
731740

741+
image_urls = IMAGE_URLS[:args.num_images]
742+
732743
if method == "generate":
733-
run_generate(model, QUESTION, IMAGE_URLS, seed)
744+
run_generate(model, QUESTION, image_urls, seed)
734745
elif method == "chat":
735-
run_chat(model, QUESTION, IMAGE_URLS, seed)
746+
run_chat(model, QUESTION, image_urls, seed)
736747
else:
737748
raise ValueError(f"Invalid method: {method}")
738749

@@ -757,6 +768,12 @@ def main(args: Namespace):
757768
type=int,
758769
default=None,
759770
help="Set the seed when initializing `vllm.LLM`.")
771+
parser.add_argument(
772+
"--num-images",
773+
"-n",
774+
choices=list(range(1, 13)), # 12 is the max number of images
775+
default=2,
776+
help="Number of images to use for the demo.")
760777

761778
args = parser.parse_args()
762779
main(args)

vllm/model_executor/models/mllama4.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -477,7 +477,9 @@ def get_hf_processor(self, **kwargs: object) -> Llama4Processor:
477477
**kwargs)
478478

479479
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
480-
return {"image": 10}
480+
# Although vLLM can support more images from an infra capability
481+
# perspective, we do not recommend using >10 images in practice.
482+
return {"image": None}
481483

482484
@staticmethod
483485
def get_patch_per_chunk(vision_config: Llama4VisionConfig) -> int:

0 commit comments

Comments
 (0)