Skip to content

Commit 34ef394

Browse files
committed
update examples
Signed-off-by: Kyle Sayers <[email protected]>
1 parent b336fa2 commit 34ef394

File tree

8 files changed

+8
-8
lines changed

8 files changed

+8
-8
lines changed

examples/multimodal_vision/idefics3_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ def data_collator(batch):
3131
GPTQModifier(
3232
targets="Linear",
3333
scheme="W4A16",
34-
sequential_targets=["LlamaDecoderLayer"],
3534
ignore=["re:.*lm_head", "re:model.vision_model.*", "re:model.connector.*"],
3635
),
3736
]
@@ -91,6 +90,7 @@ def tokenize(sample):
9190
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
9291
trust_remote_code_model=True,
9392
data_collator=data_collator,
93+
sequential_targets=["LlamaDecoderLayer"],
9494
)
9595

9696
# Confirm generations of the quantized model look sane.

examples/multimodal_vision/llava_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ def data_collator(batch):
3030
GPTQModifier(
3131
targets="Linear",
3232
scheme="W4A16",
33-
sequential_targets=["LlamaDecoderLayer"],
3433
ignore=["re:.*lm_head", "re:vision_tower.*", "re:multi_modal_projector.*"],
3534
),
3635
]
@@ -46,6 +45,7 @@ def data_collator(batch):
4645
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
4746
trust_remote_code_model=True,
4847
data_collator=data_collator,
48+
sequential_targets=["LlamaDecoderLayer"],
4949
)
5050

5151
# Confirm generations of the quantized model look sane.

examples/multimodal_vision/mistral3_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@ def data_collator(batch):
4343
GPTQModifier(
4444
targets="Linear",
4545
scheme="W4A16",
46-
sequential_targets=["MistralDecoderLayer"],
4746
ignore=["re:.*lm_head", "re:vision_tower.*", "re:multi_modal_projector.*"],
4847
),
4948
]
@@ -59,6 +58,7 @@ def data_collator(batch):
5958
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
6059
trust_remote_code_model=True,
6160
data_collator=data_collator,
61+
sequential_targets=["MistralDecoderLayer"],
6262
)
6363

6464
# Confirm generations of the quantized model look sane.

examples/multimodal_vision/mllama_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ def data_collator(batch):
3030
GPTQModifier(
3131
targets="Linear",
3232
scheme="W4A16",
33-
sequential_targets=["MllamaSelfAttentionDecoderLayer"],
3433
ignore=["re:.*lm_head", "re:multi_modal_projector.*", "re:vision_model.*"],
3534
),
3635
]
@@ -46,6 +45,7 @@ def data_collator(batch):
4645
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
4746
trust_remote_code_model=True,
4847
data_collator=data_collator,
48+
sequential_targets=["MllamaSelfAttentionDecoderLayer"],
4949
)
5050

5151
# Confirm generations of the quantized model look sane.

examples/multimodal_vision/phi3_vision_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def data_collator(batch):
7575
recipe = GPTQModifier(
7676
targets="Linear",
7777
scheme="W4A16",
78-
sequential_targets=["Phi3DecoderLayer"],
7978
ignore=["lm_head", "re:model.vision_embed_tokens.*"],
8079
)
8180

@@ -88,6 +87,7 @@ def data_collator(batch):
8887
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
8988
trust_remote_code_model=True,
9089
data_collator=data_collator,
90+
sequential_targets=["Phi3DecoderLayer"],
9191
)
9292

9393
# Confirm generations of the quantized model look sane.

examples/multimodal_vision/pixtral_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ def data_collator(batch):
3636
GPTQModifier(
3737
targets="Linear",
3838
scheme="W4A16",
39-
sequential_targets=["MistralDecoderLayer"],
4039
ignore=["re:.*lm_head", "re:vision_tower.*", "re:multi_modal_projector.*"],
4140
),
4241
]
@@ -52,6 +51,7 @@ def data_collator(batch):
5251
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
5352
trust_remote_code_model=True,
5453
data_collator=data_collator,
54+
sequential_targets=["MistralDecoderLayer"],
5555
)
5656

5757
# Confirm generations of the quantized model look sane.

examples/multimodal_vision/qwen2_vl_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ def data_collator(batch):
7979
GPTQModifier(
8080
targets="Linear",
8181
scheme="W4A16",
82-
sequential_targets=["Qwen2VLDecoderLayer"],
8382
ignore=["lm_head", "re:visual.*", "re:model.visual.*"],
8483
),
8584
]
@@ -94,6 +93,7 @@ def data_collator(batch):
9493
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
9594
trust_remote_code_model=True,
9695
data_collator=data_collator,
96+
sequential_targets=["Qwen2VLDecoderLayer"],
9797
)
9898

9999
# Confirm generations of the quantized model look sane.

examples/multimodal_vision/qwen_2_5_vl_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def data_collator(batch):
7373
GPTQModifier(
7474
targets="Linear",
7575
scheme="W4A16",
76-
sequential_targets=["Qwen2_5_VLDecoderLayer"],
7776
ignore=["lm_head", "re:visual.*", "re:model.visual.*"],
7877
),
7978
]
@@ -88,6 +87,7 @@ def data_collator(batch):
8887
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
8988
trust_remote_code_model=True,
9089
data_collator=data_collator,
90+
sequential_targets=["Qwen2_5_VLDecoderLayer"],
9191
)
9292

9393
# Confirm generations of the quantized model look sane.

0 commit comments

Comments
 (0)