Skip to content

Commit e4a3652

Browse files
committed
Merge branch 'main' into torch-version
2 parents e2f63a2 + cef316b commit e4a3652

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

rxnmapper/tokenization_smiles.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def __init__(
4949
self.ids_to_tokens = collections.OrderedDict(
5050
[(ids, tok) for tok, ids in self.vocab.items()]
5151
)
52-
self.basic_tokenizer = BasicSmilesTokenizer()
52+
self.basic_tokenizer = BasicSmilesTokenizer() # type: ignore[assignment]
5353
self.init_kwargs["model_max_length"] = self.model_max_length
5454

5555
@property
@@ -60,7 +60,7 @@ def vocab_size(self):
6060
def vocab_list(self):
6161
return list(self.vocab.keys())
6262

63-
def _tokenize(self, text):
63+
def _tokenize(self, text): # type: ignore[override]
6464
split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]
6565
return split_tokens
6666

@@ -120,7 +120,7 @@ def add_padding_tokens(self, token_ids, length, right=True):
120120
else:
121121
return padding + token_ids
122122

123-
def save_vocabulary(self, vocab_path):
123+
def save_vocabulary(self, vocab_path): # type: ignore[override]
124124
"""Save the tokenizer vocabulary to a file."""
125125
index = 0
126126
vocab_file = vocab_path

0 commit comments

Comments
 (0)