Skip to content

Commit 3756f10

Browse files
committed
Remove trust_remote_code and move save to after generate
Signed-off-by: Fynn Schmitt-Ulms <fschmitt@redhat.com>
1 parent 33d6677 commit 3756f10

File tree

1 file changed

+7
-9
lines changed

1 file changed

+7
-9
lines changed

examples/awq/qwen3_coder_moe_example.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -51,10 +51,8 @@ def preprocess(example):
5151

5252

5353
if __name__ == "__main__":
54-
model = AutoModelForCausalLM.from_pretrained(
55-
MODEL_ID, torch_dtype="auto", trust_remote_code=True
56-
)
57-
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
54+
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto")
55+
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
5856

5957
###
6058
### Apply algorithms.
@@ -66,18 +64,18 @@ def preprocess(example):
6664
max_seq_length=MAX_SEQUENCE_LENGTH,
6765
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
6866
log_dir=None,
69-
trust_remote_code_model=True,
7067
)
7168

72-
model.save_pretrained(SAVE_DIR)
73-
tokenizer.save_pretrained(SAVE_DIR)
74-
7569
# Confirm generations of the quantized model look sane.
7670
print("========== SAMPLE GENERATION ==============")
7771
dispatch_for_generation(model)
7872
input_ids = tokenizer(
7973
"Write a binary search function", return_tensors="pt"
8074
).input_ids.to(model.device)
81-
output = model.generate(input_ids, max_new_tokens=100)
75+
output = model.generate(input_ids, max_new_tokens=150)
8276
print(tokenizer.decode(output[0]))
8377
print("==========================================\n\n")
78+
79+
# Save model to disk
80+
model.save_pretrained(SAVE_DIR)
81+
tokenizer.save_pretrained(SAVE_DIR)

0 commit comments

Comments
 (0)