Skip to content

Commit f76bc85

Browse files
committed
Read number of vision tower layers from config for Pixtral (fix Pixtral-Large)
1 parent 4061c24 commit f76bc85

File tree

2 files changed

+4
-1
lines changed

2 files changed

+4
-1
lines changed

examples/multimodal.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@
1818
from PIL import Image
1919
import requests
2020

21+
import torch
22+
torch.set_printoptions(precision = 5, sci_mode = False, linewidth=200)
23+
2124
# Models used:
2225
#
2326
# Pixtral:

exllamav2/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -482,7 +482,7 @@ def check_keys(archparams, prefix):
482482
patch_size = read(read_config, int, ["vision_config->patch_size"], no_default)
483483
self.vision_rope_theta = read(read_config, int, ["vision_config->rope_theta"], no_default)
484484
self.vision_feature_layer = read(read_config, int, ["vision_feature_layer"], no_default)
485-
self.vision_num_layers = 24
485+
self.vision_num_layers = read(read_config, int, ["vision_config->num_hidden_layers"], 24)
486486
self.vision_intermediate_size = read(read_config, int, ["vision_config->intermediate_size"], self.hidden_size)
487487

488488
image_processor_type = read(read_prep_config, str, ["image_processor_type"], no_default)

0 commit comments

Comments
 (0)