|
| 1 | +import torch |
| 2 | +import torch.nn as nn |
| 3 | +import numpy as np |
| 4 | +import transformers |
| 5 | +from transformers import AutoTokenizer, AutoModelForCausalLM |
| 6 | +import pickle |
| 7 | +import os |
| 8 | +from sacrebleu.metrics import BLEU |
| 9 | +from .data_selection import * |
| 10 | +from pathlib import Path |
| 11 | +from torch.utils.data import DataLoader |
| 12 | +import time |
| 13 | +from enum import Enum, verify, UNIQUE |
| 14 | +from transformers import BitsAndBytesConfig |
| 15 | +from huggingface_hub import login |
| 16 | +from datasets import Dataset, load_dataset |
| 17 | +from peft import LoraConfig, PeftModel, prepare_model_for_kbit_training, get_peft_model |
| 18 | +from trl import SFTTrainer |
| 19 | + |
| 20 | +hf_access_token = os.getenv("HF_ACCESS_TOKEN") |
| 21 | +assert hf_access_token is not None, "You need to set the Hugging Face access token environment variable: export HF_ACCESS_TOKEN=hf_TODO" |
| 22 | + |
| 23 | +login(token = hf_access_token) |
| 24 | + |
| 25 | +def training(translation): |
| 26 | + |
| 27 | + create_datasets(translation) |
| 28 | + |
| 29 | + if translation == Translation.TextToGloss: |
| 30 | + translation_dir = "t2g_llama" |
| 31 | + elif translation == Translation.GlossToText: |
| 32 | + translation_dir = "g2t_llama" |
| 33 | + else: |
| 34 | + raise ValueError("Invalid translation") |
| 35 | + |
| 36 | + |
| 37 | + with open(f"train_data_{translation_dir}.json", "r") as f: |
| 38 | + train_data = json.load(f) |
| 39 | + |
| 40 | + with open(f"val_data_{translation_dir}.json", "r") as f: |
| 41 | + val_data = json.load(f) |
| 42 | + |
| 43 | + train_dataset = Dataset.from_list(train_data) |
| 44 | + val_dataset = Dataset.from_list(val_data) |
| 45 | + |
| 46 | + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| 47 | + torch.cuda.empty_cache() |
| 48 | + cache_dir = "/ds/videos/AVASAG/cache" |
| 49 | + model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct" |
| 50 | + |
| 51 | + tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_access_token, cache_dir=cache_dir, add_eos_token=True) |
| 52 | + # Set padding token |
| 53 | + tokenizer.pad_token = tokenizer.eos_token |
| 54 | + tokenizer.padding_side = "right" |
| 55 | + |
| 56 | + bnb_config = BitsAndBytesConfig( |
| 57 | + load_in_4bit=True, |
| 58 | + bnb_4bit_use_double_quant=True, |
| 59 | + bnb_4bit_quant_type="nf4", |
| 60 | + bnb_4bit_compute_dtype=torch.bfloat16 |
| 61 | + ) |
| 62 | + |
| 63 | + save_folder = os.path.join("/ds/videos/AVASAG/llama_finetune/", translation_dir) |
| 64 | + sft_model_name = os.path.join(save_folder, "llama-31-it-8b-sft") |
| 65 | + merged_model_name=os.path.join(save_folder, "llama-31-it-8b-sft-merged") |
| 66 | + |
| 67 | + model = AutoModelForCausalLM.from_pretrained( |
| 68 | + model_id, device_map="auto", torch_dtype=torch.bfloat16, quantization_config=bnb_config, token=hf_access_token, cache_dir=cache_dir) |
| 69 | + |
| 70 | + model = prepare_model_for_kbit_training(model) |
| 71 | + |
| 72 | + modules = ["down_proj","up_proj","gate_proj"] |
| 73 | + |
| 74 | + lora_config = LoraConfig( |
| 75 | + r=64, |
| 76 | + lora_alpha=32, |
| 77 | + target_modules=modules, |
| 78 | + lora_dropout=0.05, |
| 79 | + bias="none", |
| 80 | + task_type="CAUSAL_LM" |
| 81 | + ) |
| 82 | + |
| 83 | + model = get_peft_model(model, lora_config) |
| 84 | + |
| 85 | + trainable, total = model.get_nb_trainable_parameters() |
| 86 | + print(f"Trainable: {trainable} | total: {total} | Percentage: {trainable/total*100:.4f}%") |
| 87 | + |
| 88 | + tokenizer.pad_token = tokenizer.eos_token |
| 89 | + torch.cuda.empty_cache() |
| 90 | + |
| 91 | + trainer = SFTTrainer( |
| 92 | + model=model, |
| 93 | + train_dataset=train_dataset, |
| 94 | + eval_dataset=val_dataset, |
| 95 | + dataset_text_field="text", |
| 96 | + peft_config=lora_config, |
| 97 | + args=transformers.TrainingArguments( |
| 98 | + report_to=[], # Disable logging |
| 99 | + per_device_train_batch_size=1, |
| 100 | + gradient_accumulation_steps=4, |
| 101 | + warmup_ratio=0.03, |
| 102 | + max_steps=1000, |
| 103 | + learning_rate=2e-5, |
| 104 | + logging_steps=1, |
| 105 | + output_dir="/ds/videos/AVASAG/llama_finetune/outputs_{translation_dir}", |
| 106 | + optim="paged_adamw_8bit", |
| 107 | + save_strategy="epoch", |
| 108 | + ddp_find_unused_parameters=False, |
| 109 | + ), |
| 110 | + data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), |
| 111 | + ) |
| 112 | + model.config.use_cache = False |
| 113 | + trainer.train() |
| 114 | + |
| 115 | + trainer.model.save_pretrained(sft_model_name) |
| 116 | + |
| 117 | + base_model = AutoModelForCausalLM.from_pretrained( |
| 118 | + model_id, |
| 119 | + low_cpu_mem_usage=True, |
| 120 | + return_dict=True, |
| 121 | + torch_dtype=torch.float16, |
| 122 | + device_map="auto", |
| 123 | + ) |
| 124 | + merged_model = PeftModel.from_pretrained(base_model, sft_model_name) |
| 125 | + merged_model = merged_model.merge_and_unload() |
| 126 | + |
| 127 | + merged_model.save_pretrained(merged_model_name, safe_serialization=True) |
| 128 | + tokenizer.save_pretrained(merged_model_name) |
| 129 | + |
| 130 | + |
| 131 | +if __name__ == "__main__": |
| 132 | + import sys |
| 133 | + |
| 134 | + if len(sys.argv) != 2: |
| 135 | + print("Usage: python k_fold.py [--textTogloss|--glossTotext]") |
| 136 | + sys.exit(1) |
| 137 | + |
| 138 | + if sys.argv[1] == "--textTogloss": |
| 139 | + print("Translating from Text to Gloss") |
| 140 | + translation = Translation.TextToGloss |
| 141 | + elif sys.argv[1] == "--glossTotext": |
| 142 | + print("Translating from Gloss to Text ") |
| 143 | + translation = Translation.GlossToText |
| 144 | + else: |
| 145 | + print("You have to specify either --textTogloss or --glossTotext as an argument.") |
| 146 | + sys.exit(1) |
| 147 | + |
| 148 | + training(translation) |
0 commit comments