Skip to content

Commit 91f14f1

Browse files
Sai-Suraj-27molbap
andauthored
Removed some duplicated code (#35637)
* Removed duplicate class field definition. * Removed duplicate code in try-except block. --------- Co-authored-by: Pablo Montalvo <[email protected]>
1 parent b8c34d9 commit 91f14f1

File tree

2 files changed

+0
-8
lines changed

2 files changed

+0
-8
lines changed

src/transformers/models/paligemma/modeling_paligemma.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,6 @@ class PaliGemmaPreTrainedModel(PreTrainedModel):
196196
_supports_cache_class = True
197197
_supports_quantized_cache = True
198198
_supports_static_cache = True
199-
_supports_cache_class = True
200199
_supports_flash_attn_2 = True
201200
_supports_sdpa = True
202201

src/transformers/tokenization_utils_base.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2292,13 +2292,6 @@ def _from_pretrained(
22922292
"Unable to load vocabulary from file. "
22932293
"Please check that the provided vocabulary is accessible and not corrupted."
22942294
)
2295-
except RuntimeError as e:
2296-
if "sentencepiece_processor.cc" in str(e):
2297-
logger.info(
2298-
"Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
2299-
"(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).",
2300-
)
2301-
return False
23022295

23032296
if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size:
23042297
logger.info(

0 commit comments

Comments
 (0)