We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0c81fa1 commit 79ef31bCopy full SHA for 79ef31b
examples/llm_qat/export.py
@@ -34,7 +34,7 @@
34
mto.enable_huggingface_checkpointing()
35
36
37
-def get_lora_model(
+def get_model(
38
ckpt_path: str,
39
device="cuda",
40
):
@@ -66,7 +66,7 @@ def get_lora_model(
66
67
def main(args):
68
# Load model
69
- model = get_lora_model(args.pyt_ckpt_path, args.device)
+ model = get_model(args.pyt_ckpt_path, args.device)
70
tokenizer = AutoTokenizer.from_pretrained(args.pyt_ckpt_path)
71
is_qlora = hasattr(model, "peft_config")
72
0 commit comments