-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpredict.py
More file actions
103 lines (87 loc) · 2.9 KB
/
predict.py
File metadata and controls
103 lines (87 loc) · 2.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# predict.py
import torch
import json
import argparse
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
from utils import get_prompt, get_bnb_config
@torch.inference_mode()
def generate_predictions(model, tokenizer, data, max_length=2048, gen_max_len=256):
results = []
for item in tqdm(data, desc="Generating"):
prompt = get_prompt(item["instruction"])
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output = model.generate(
**inputs,
max_new_tokens=gen_max_len,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
)
decoded = tokenizer.decode(
output[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True
)
results.append({"instruction": item["instruction"], "output": decoded.strip()})
return results
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path",
type=str,
default="Qwen/Qwen3-4B",
help="Path to base model. Default: Qwen/Qwen3-4B",
)
parser.add_argument(
"--peft_path",
type=str,
required=True,
help="Path to LoRA adapter.",
)
parser.add_argument(
"--input_file",
type=str,
required=True,
help="Input .json file path",
)
parser.add_argument(
"--output_file",
type=str,
required=True,
help="Output .json file path",
)
args = parser.parse_args()
# ===== 載入模型 =====
print(f"[INFO] Loading base model: {args.base_model_path}")
bnb_config = get_bnb_config()
model = AutoModelForCausalLM.from_pretrained(
args.base_model_path,
device_map="auto",
torch_dtype=torch.bfloat16,
quantization_config=bnb_config,
)
tokenizer = AutoTokenizer.from_pretrained(
args.base_model_path, trust_remote_code=True
)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
# ===== 套用 LoRA =====
if args.peft_path:
print(f"[INFO] Loading LoRA adapter from: {args.peft_path}")
model = PeftModel.from_pretrained(model, args.peft_path)
else:
print("[INFO] No LoRA adapter provided, using base model only.")
model.eval()
# ===== 載入測試資料 =====
with open(args.input_file, "r") as f:
data = json.load(f)
print(f"[INFO] Loaded {len(data)} samples from {args.input_file}")
# ===== 生成預測 =====
results = generate_predictions(model, tokenizer, data)
# ===== 儲存輸出 =====
with open(args.output_file, "w", encoding="utf-8") as f:
json.dump(results, f, ensure_ascii=False, indent=2)
print(f"[INFO] Predictions saved to {args.output_file}")
if __name__ == "__main__":
main()