Skip to content

Commit 4f40c6d

Browse files
authored
[Examples] Use more robust splits (#1544)
## Purpose ## * Make tests more robust to custom model ids that don't have a "/" in the name * Many users replace the model ids with local paths. These local paths do not necessarily have "/"s in them, and will fail at the very end of compression, which is decidedly a bad time * Also account for cases where users pass paths to directories whose names end with "/" ## Changes ## * `model_id.split("/")[1]` -> `model_id.rstrip("/").split("/")[-1]` ## Testing ## * `grep -r -i 'MODEL_ID.split("/")\[1\]' src examples/ tests` --------- Signed-off-by: Kyle Sayers <[email protected]>
1 parent c8de4ba commit 4f40c6d

39 files changed

+42
-40
lines changed

examples/awq/llama_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,6 @@ def tokenize(sample):
7272
print("==========================================\n\n")
7373

7474
# Save to disk compressed.
75-
SAVE_DIR = MODEL_ID.split("/")[-1] + "-awq-asym"
75+
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-asym"
7676
model.save_pretrained(SAVE_DIR, save_compressed=True)
7777
tokenizer.save_pretrained(SAVE_DIR)

examples/awq/qwen3_moe_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,6 @@ def tokenize(sample):
7777
print("==========================================\n\n")
7878

7979
# Save to disk compressed.
80-
SAVE_DIR = MODEL_ID.split("/")[-1] + "-awq-sym"
80+
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-awq-sym"
8181
model.save_pretrained(SAVE_DIR, save_compressed=True)
8282
tokenizer.save_pretrained(SAVE_DIR)

examples/big_models_with_accelerate/cpu_offloading_fp8.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from llmcompressor.modifiers.quantization import QuantizationModifier
55

66
MODEL_ID = "meta-llama/Meta-Llama-3-70B-Instruct"
7-
OUTPUT_DIR = MODEL_ID.split("/")[1] + "-FP8-Dynamic"
7+
OUTPUT_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-FP8-Dynamic"
88

99
# Load model
1010
# Note: device_map="auto" will offload to CPU if not enough space on GPU.

examples/big_models_with_accelerate/mult_gpus_int8_device_map.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def tokenize(sample):
6868
),
6969
]
7070

71-
SAVE_DIR = MODEL_ID.split("/")[1] + "-INT8"
71+
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-INT8"
7272

7373
oneshot(
7474
model=model,

examples/big_models_with_accelerate/multi_gpu_int8.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from llmcompressor.modifiers.quantization import GPTQModifier
66

77
MODEL_ID = "meta-llama/Meta-Llama-3-70B-Instruct"
8-
SAVE_DIR = MODEL_ID.split("/")[1] + "-W8A8-Dynamic"
8+
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-W8A8-Dynamic"
99

1010
# 1) Load model (device_map="auto" with shard the model over multiple GPUs!).
1111
model = AutoModelForCausalLM.from_pretrained(

examples/multimodal_audio/whisper_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,6 @@ def data_collator(batch):
106106
# and it was a great thing for what it was at the time but it's not a passive house
107107

108108
# Save to disk compressed.
109-
SAVE_DIR = MODEL_ID.split("/")[1] + "-W4A16-G128"
109+
SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-W4A16-G128"
110110
model.save_pretrained(SAVE_DIR, save_compressed=True)
111111
processor.save_pretrained(SAVE_DIR)

examples/multimodal_vision/gemma3_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,6 @@ def data_collator(batch):
7474
print("==========================================")
7575

7676
# Save to disk compressed.
77-
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
77+
SAVE_DIR = model_id.rstrip("/").split("/")[-1] + "-W4A16-G128"
7878
model.save_pretrained(SAVE_DIR, save_compressed=True)
7979
processor.save_pretrained(SAVE_DIR)

examples/multimodal_vision/idefics3_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,6 @@ def tokenize(sample):
115115
print("==========================================")
116116

117117
# Save to disk compressed.
118-
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
118+
SAVE_DIR = model_id.rstrip("/").split("/")[-1] + "-W4A16-G128"
119119
model.save_pretrained(SAVE_DIR, save_compressed=True)
120120
processor.save_pretrained(SAVE_DIR)

examples/multimodal_vision/llava_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,6 @@ def data_collator(batch):
7070
print("==========================================")
7171

7272
# Save to disk compressed.
73-
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
73+
SAVE_DIR = model_id.rstrip("/").split("/")[-1] + "-W4A16-G128"
7474
model.save_pretrained(SAVE_DIR, save_compressed=True)
7575
processor.save_pretrained(SAVE_DIR)

examples/multimodal_vision/mistral3_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,6 @@ def data_collator(batch):
8484
print("==========================================")
8585

8686
# Save to disk compressed.
87-
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
87+
SAVE_DIR = model_id.rstrip("/").split("/")[-1] + "-W4A16-G128"
8888
model.save_pretrained(SAVE_DIR, save_compressed=True)
8989
processor.save_pretrained(SAVE_DIR)

0 commit comments

Comments
 (0)