Skip to content

Commit 952fac1

Browse files
authored
Enable SIM rules (#39806)
* Enable SIM rules Signed-off-by: cyy <[email protected]> * More fixes Signed-off-by: cyy <[email protected]> --------- Signed-off-by: cyy <[email protected]>
1 parent 41d1717 commit 952fac1

33 files changed

+60
-89
lines changed

pyproject.toml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,13 @@ line-length = 119
1919

2020
[tool.ruff.lint]
2121
# Never enforce `E501` (line length violations).
22-
ignore = ["C901", "E501", "E741", "F402", "F823"]
22+
# SIM300: Yoda condition detected
23+
# SIM212: Checks for if expressions that check against a negated condition.
24+
# SIM905: Consider using a list literal instead of `str.split`
25+
ignore = ["C901", "E501", "E741", "F402", "F823", "SIM1", "SIM300", "SIM212", "SIM905"]
2326
# RUF013: Checks for the use of implicit Optional
2427
# in type annotations when the default parameter value is None.
25-
select = ["C", "E", "F", "I", "W", "RUF013", "UP006", "PERF102", "PLC1802", "PLC0208"]
28+
select = ["C", "E", "F", "I", "W", "RUF013", "UP006", "PERF102", "PLC1802", "PLC0208","SIM"]
2629
extend-safe-fixes = ["UP006"]
2730

2831
# Ignore import violations in all `__init__.py` files.

src/transformers/commands/serving.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -901,7 +901,7 @@ def generate_chat_completion(self, req: dict) -> Generator[str, None, None]:
901901
inputs = processor.apply_chat_template(
902902
processor_inputs,
903903
add_generation_prompt=True,
904-
tools=req.get("tools", None),
904+
tools=req.get("tools"),
905905
return_tensors="pt",
906906
return_dict=True,
907907
tokenize=True,

src/transformers/data/data_collator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ def tf_default_data_collator(features: list[InputDataClass]) -> dict[str, Any]:
183183
if label_col_name is not None:
184184
if isinstance(first[label_col_name], tf.Tensor):
185185
dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
186-
elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
186+
elif isinstance(first[label_col_name], (np.ndarray, np.generic)):
187187
dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
188188
elif isinstance(first[label_col_name], (tuple, list)):
189189
dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32

src/transformers/generation/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -647,8 +647,8 @@ def prepare_inputs_for_generation(
647647

648648
# If it's not defined, it means the model uses the new general mask API
649649
if causal_mask_creation_function is None: # can't be found
650-
token_type_ids = model_inputs.get("token_type_ids", None)
651-
position_ids = model_inputs.get(position_ids_key, None)
650+
token_type_ids = model_inputs.get("token_type_ids")
651+
position_ids = model_inputs.get(position_ids_key)
652652
# Some models may overwrite the general one
653653
causal_mask_creation_function = getattr(self, "create_masks_for_generate", create_masks_for_generate)
654654
attention_mask = causal_mask_creation_function(

src/transformers/integrations/flex_attention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def score_mod(score, batch_idx, head_idx, q_idx, kv_idx):
284284
num_local_query_heads = query.shape[1]
285285

286286
# When running TP this helps:
287-
if not ((num_local_query_heads & (num_local_query_heads - 1)) == 0):
287+
if (num_local_query_heads & (num_local_query_heads - 1)) != 0:
288288
key = repeat_kv(key, query.shape[1] // key.shape[1])
289289
value = repeat_kv(value, query.shape[1] // value.shape[1])
290290
enable_gqa = False

src/transformers/integrations/vptq.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def replace_with_vptq_linear(
4545
should not be passed by the user.
4646
"""
4747

48-
modules_to_not_convert = ["lm_head"] if not modules_to_not_convert else modules_to_not_convert
48+
modules_to_not_convert = modules_to_not_convert if modules_to_not_convert else ["lm_head"]
4949

5050
for name, module in model.named_children():
5151
if current_key_name is None:

src/transformers/keras_callbacks.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def _postprocess_predictions_or_labels(self, inputs):
167167
# If it's a dict with only one key, just return the array
168168
if len(outputs) == 1:
169169
outputs = list(outputs.values())[0]
170-
elif isinstance(inputs[0], list) or isinstance(inputs[0], tuple):
170+
elif isinstance(inputs[0], (tuple, list)):
171171
outputs = []
172172
for input_list in zip(*inputs):
173173
outputs.append(self._concatenate_batches(input_list))

src/transformers/models/aya_vision/configuration_aya_vision.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -81,9 +81,7 @@ def __init__(
8181
self.vision_feature_layer = vision_feature_layer
8282

8383
if isinstance(vision_config, dict):
84-
vision_config["model_type"] = (
85-
vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
86-
)
84+
vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model")
8785
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
8886
elif vision_config is None:
8987
vision_config = CONFIG_MAPPING["siglip_vision_model"](
@@ -99,7 +97,7 @@ def __init__(
9997
self.vision_config = vision_config
10098

10199
if isinstance(text_config, dict):
102-
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "cohere2"
100+
text_config["model_type"] = text_config.get("model_type", "cohere2")
103101
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
104102
elif text_config is None:
105103
text_config = CONFIG_MAPPING["cohere2"]()

src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def rewrite_dict_keys(d):
148148
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
149149
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
150150
d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items())
151-
keep_keys = "<s> <pad> </s> <unk>".split()
151+
keep_keys = ["<s>", "<pad>", "</s>", "<unk>"]
152152
# restore the special tokens
153153
for k in keep_keys:
154154
del d2[f"{k}</w>"]

src/transformers/models/clvp/modeling_clvp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1303,7 +1303,7 @@ def _prepare_model_inputs(
13031303

13041304
# Check if conditioning_embeds are provided or not, if yes then concatenate the bos_token_id at the end of the conditioning_embeds.
13051305
# Then we must subtract the positional_ids because during the forward pass it will be added anyways, so we must cancel them out here.
1306-
conditioning_embeds = model_kwargs.get("conditioning_embeds", None)
1306+
conditioning_embeds = model_kwargs.get("conditioning_embeds")
13071307

13081308
if conditioning_embeds is not None:
13091309
mel_start_token_embedding = self.model.decoder.input_embeds_layer(

0 commit comments

Comments
 (0)