-
Notifications
You must be signed in to change notification settings - Fork 14
Expand file tree
/
Copy pathtrain_tokenizer.py
More file actions
38 lines (29 loc) · 1.14 KB
/
train_tokenizer.py
File metadata and controls
38 lines (29 loc) · 1.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# imports
from datasets import load_dataset, Dataset, concatenate_datasets
import pandas as pd
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
from tokenizers.pre_tokenizers import Whitespace
# -----------------------------
# load dataset
dataset = load_dataset("roneneldan/TinyStories")
# write data to csv
concatenate_datasets([dataset["validation"], dataset['train']]).to_csv("data/stories.csv")
# convert csv file to txt file
input_file_path = "data/stories.csv"
df = pd.read_csv(input_file_path)
output_file_path = "data/stories.txt"
with open(output_file_path, 'w') as file:
for row in df['text']:
file.write(str(row) + '\n')
# -----------------------------
# initialize tokenizer
tokenizer = Tokenizer(BPE(unk_token="<|unknown|>"))
trainer = BpeTrainer(special_tokens=["<|unknown|>", "<|im_start|>", "<|im_end|>"], vocab_size=2048, min_frequency=1)
tokenizer.pre_tokenizer = Whitespace()
# -----------------------------
# train and save tokenizer
tokenizer.train(["data/stories.txt"], trainer)
tokenizer.save("data/TinyStories-tokenizer.json")
# -----------------------------