-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtrain.py
More file actions
145 lines (126 loc) · 4.65 KB
/
train.py
File metadata and controls
145 lines (126 loc) · 4.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import warnings
import hydra
import torch
import wandb
from loguru import logger
from peft import LoraConfig, get_peft_model
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
EarlyStoppingCallback,
Idefics2ForConditionalGeneration,
Trainer,
TrainingArguments,
)
from plancraft.config import TrainConfig
from plancraft.environment.actions import convert_from_slot_index
from plancraft.train.dataset import get_dataset_and_collate
warnings.filterwarnings("ignore")
def flatten_cfg(cfg):
# for some reason hydra wraps file paths from config path
if len(cfg) == 1:
return flatten_cfg(cfg[list(cfg.keys())[0]])
return cfg
@hydra.main(config_path="configs", config_name="train", version_base=None)
def main(cfg):
logger.info(cfg)
cfg = TrainConfig(**flatten_cfg(dict(cfg)))
torch.set_float32_matmul_precision("medium")
if cfg.training.base_model == "llama3":
model_name = "/nfs/public/hf/models/meta-llama/Meta-Llama-3.1-8B-Instruct"
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
)
target_modules = [
"q_proj",
"v_proj",
"k_proj",
]
elif cfg.training.base_model == "idefics2":
model_name = "/nfs/public/hf/models/HuggingFaceM4/idefics2-8b-chatty"
model = Idefics2ForConditionalGeneration.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.bfloat16,
)
target_modules = ".*(text_model|modality_projection|perceiver_resampler).*(down_proj|gate_proj|up_proj|k_proj|q_proj|v_proj|o_proj).*$"
else:
raise ValueError(f"Model {cfg.training.base_model} not supported")
tokenizer = AutoTokenizer.from_pretrained(model_name)
# add special slots to tokenizer
slots = [convert_from_slot_index(i) for i in range(0, 46)]
tokenizer.add_special_tokens({"additional_special_tokens": slots})
# resize token embeddings in model
model.resize_token_embeddings(len(tokenizer))
train_dataset, val_dataset, collate_fn = get_dataset_and_collate(
tokenizer,
template_name=cfg.training.base_model,
max_length=cfg.training.max_seq_length,
max_message_window=cfg.training.max_message_window,
trace_mode=cfg.training.trace_mode,
)
lora_config = LoraConfig(
r=cfg.training.lora_r,
lora_alpha=cfg.training.lora_alpha,
lora_dropout=cfg.training.lora_dropout,
target_modules=target_modules,
init_lora_weights="gaussian",
bias="none",
task_type="CAUSAL_LM",
modules_to_save=[
"embed_tokens",
"lm_head",
], # train and save embeddings/output layer
)
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
name = f"{cfg.training.trace_mode}-{cfg.training.base_model}-r{cfg.training.lora_r}-a{cfg.training.lora_alpha}"
wandb.init(
project=cfg.wandb.project,
entity=cfg.wandb.entity,
mode=cfg.wandb.mode,
config=cfg.model_dump(),
name=name,
)
training_args = TrainingArguments(
output_dir=f"outputs/{name}",
evaluation_strategy="epoch",
save_strategy="epoch",
per_device_train_batch_size=cfg.training.batch_size,
per_device_eval_batch_size=cfg.training.batch_size,
num_train_epochs=cfg.training.num_train_epochs,
gradient_accumulation_steps=cfg.training.gradient_accumulation_steps,
max_grad_norm=cfg.training.max_grad_norm,
learning_rate=cfg.training.learning_rate,
optim="adamw_hf",
lr_scheduler_type="cosine",
warmup_ratio=cfg.training.warmup_ratio,
dataloader_num_workers=cfg.training.num_workers,
dataloader_pin_memory=True,
logging_dir=f"outputs/logs/{name}",
logging_steps=1,
save_total_limit=1,
load_best_model_at_end=True,
metric_for_best_model="eval_loss",
gradient_checkpointing=False,
bf16=True if torch.cuda.is_bf16_supported() else False, # bf16 support check
report_to="wandb",
)
# Initialize the Huggingface Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
data_collator=collate_fn,
callbacks=[EarlyStoppingCallback(early_stopping_patience=1)],
)
trainer.train()
if cfg.training.push_to_hub:
model.push_to_hub(name)
tokenizer.push_to_hub(name)
if __name__ == "__main__":
main()