-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtokenizer.py
More file actions
68 lines (54 loc) · 2.78 KB
/
tokenizer.py
File metadata and controls
68 lines (54 loc) · 2.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from datasets import load_dataset
from tokenizers import decoders, models, normalizers, pre_tokenizers, processors, trainers, Tokenizer
from transformers import BertTokenizer
# #download pretrained------------------------------------------------------------------
# tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# #encode-------------------------------------------------------------------------------
# sequence = "Using a Transformer network is simple"
# tokens = tokenizer.tokenize(sequence)
# print(tokens) #display
# ids = tokenizer.convert_tokens_to_ids(tokens)
# print(ids)#display
# #decode-------------------------------------------------------------------------------
# decoded_string = tokenizer.decode([7993, 170, 13809, 23763, 2443, 1110, 3014])
# print(decoded_string)
#settings---------------------------------------------------------------------------
batch_size = 1000
vocab_size = 10000
#acquire dataset--------------------------------------------------------------------
dataset = load_dataset("wikitext", name="wikitext-2-raw-v1", split="train")
#display
print(dataset)
print(dataset[1])
print(dataset[4])
#segment into batch-----------------------------------------------------------------
def batch_iterator():
for i in range(0, len(dataset), batch_size):
yield dataset[i : i + batch_size]["text"]
#train------------------------------------------------------------------------------
custom_tokenizer = Tokenizer(models.WordPiece(unk_token="[UNK]"))
custom_tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True) #all lowercase
custom_tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() #separate by spaces
#display
print(custom_tokenizer.pre_tokenizer.pre_tokenize_str("This is an example!"))
special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
trainer = trainers.WordPieceTrainer(vocab_size=vocab_size, special_tokens=special_tokens)#create trainer
custom_tokenizer.train_from_iterator(batch_iterator(), trainer=trainer)#train
#post-processing------------------------------------------------------------------
cls_token_id = custom_tokenizer.token_to_id("[CLS]")
sep_token_id = custom_tokenizer.token_to_id("[SEP]")
print(cls_token_id, sep_token_id)
custom_tokenizer.post_processor = processors.TemplateProcessing(
single=f"[CLS]:0 $A:0 [SEP]:0",
pair=f"[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
special_tokens=[
("[CLS]", cls_token_id),
("[SEP]", sep_token_id),
],
)
#check encoding-------------------------------------------------------------------
encoding = custom_tokenizer.encode("This is one sentence.", "With this one we have a pair.")
print(encoding.tokens)
print(encoding.type_ids)
#save pre-trained-------------------------------------------------------------------
custom_tokenizer.save("my-new-tokenizer.json")