-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathroberta-train.py
More file actions
88 lines (70 loc) · 2.46 KB
/
roberta-train.py
File metadata and controls
88 lines (70 loc) · 2.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from transformers import RobertaConfig
from transformers import RobertaForMaskedLM
from tokenizers import Tokenizer
from transformers import DataCollatorForLanguageModeling
from transformers import Trainer, TrainingArguments
from transformers import AutoTokenizer
import torch
from torch.utils.data import Dataset
from pathlib import Path
import random
import math
from datasets import load_dataset
# Set a configuration for our RoBERTa model
config = RobertaConfig(vocab_size=50000)
# Initialize the model from a configuration without pretrained weights
model = RobertaForMaskedLM(config=config)
print('Num parameters: ', model.num_parameters())
print(f"\n\nLoading Tokenizer....\n\n")
# Create the tokenizer from a trained one
tokenizer = Tokenizer.from_file("Robert/config.json")
def tokenize_function(examples):
return tokenizer.encode(examples["text"])
file_paths = [str(x) for x in Path("nepali-text").glob("**/*.txt")]
print(f"\n\nLoading Dataset....\n\n")
# Load all the text files for training the model
dataset = load_dataset('text', data_files=file_paths, cache_dir="cache")
dataset = dataset.map(tokenize_function, batched=True, num_proc=16, remove_columns=["text"])
# Define the Data Collator
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=0.15)
default_args = {
"output_dir": "Robert",
"num_train_epochs": 1,
"log_level": "error",
"report_to": "none",
}
# Define the training arguments
training_args = TrainingArguments(
overwrite_output_dir=True,
per_device_train_batch_size=8,
gradient_accumulation_steps=8,
gradient_checkpointing=True,
fp16=True,
save_steps=10_000,
save_total_limit=1,
**default_args,
do_eval=False,
evaluation_strategy="no",
)
model.resize_token_embeddings(len(tokenizer))
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=dataset['train'],
)
print(f"\n\nTraining Started....\n\n")
# Train the model
trainer.train()
trainer.save_model("Robert")
# # eval_results = trainer.evaluate()
# # print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
# fill_mask = pipeline(
# "fill-mask",
# model="Robert",
# tokenizer="Robert"
# )
# fill_mask("हामीले यसलाई कसरी <mask> गर्न सक्छ?")
# # The test text: Round neck sweater with long sleeves
# fill_mask("तपाईंलाई कस्तो <mask> चाहिएको छ?")