Skip to content

Commit fa36c97

Browse files
authored
Remove unnecessary list comprehension (#41305)
Remove unnecessary comprehension Signed-off-by: Yuanyuan Chen <[email protected]>
1 parent 7a1aeec commit fa36c97

22 files changed

+25
-27
lines changed

examples/pytorch/contrastive-image-text/run_clip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ def _freeze_params(module):
387387
return
388388

389389
# 6. Get the column names for input/target.
390-
dataset_columns = dataset_name_mapping.get(data_args.dataset_name, None)
390+
dataset_columns = dataset_name_mapping.get(data_args.dataset_name)
391391
if data_args.image_column is None:
392392
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
393393
else:

examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -933,7 +933,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
933933
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
934934
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())
935935

936-
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
936+
max_len = max(x.shape[1] for x in all_end_top_log_probs) # Get the max_length of the tensor
937937

938938
# concatenate all numpy arrays collected above
939939
start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len)
@@ -993,7 +993,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
993993
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
994994
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())
995995

996-
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
996+
max_len = max(x.shape[1] for x in all_end_top_log_probs) # Get the max_length of the tensor
997997

998998
# concatenate all numpy arrays collected above
999999
start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, predict_dataset, max_len)

examples/pytorch/question-answering/run_seq2seq_qa.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,7 @@ def main():
416416
return
417417

418418
# Get the column names for input/target.
419-
dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name, None)
419+
dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name)
420420
if data_args.question_column is None:
421421
question_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
422422
else:

examples/pytorch/summarization/run_summarization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -531,7 +531,7 @@ def main():
531531
model.config.forced_bos_token_id = forced_bos_token_id
532532

533533
# Get the column names for input/target.
534-
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
534+
dataset_columns = summarization_name_mapping.get(data_args.dataset_name)
535535
if data_args.text_column is None:
536536
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
537537
else:

examples/pytorch/summarization/run_summarization_no_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,7 @@ def main():
476476
column_names = raw_datasets["train"].column_names
477477

478478
# Get the column names for input/target.
479-
dataset_columns = summarization_name_mapping.get(args.dataset_name, None)
479+
dataset_columns = summarization_name_mapping.get(args.dataset_name)
480480
if args.text_column is None:
481481
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
482482
else:

src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def __init__(self, **kwargs: Unpack[DeepseekVLHybridImageProcessorKwargs]):
6565
if kwargs.get("image_mean") is None:
6666
background_color = (127, 127, 127)
6767
else:
68-
background_color = tuple([int(x * 255) for x in kwargs.get("image_mean")])
68+
background_color = tuple(int(x * 255) for x in kwargs.get("image_mean"))
6969
if kwargs.get("high_res_image_mean") is None:
7070
high_res_background_color = (127, 127, 127)
7171
else:

src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -764,7 +764,7 @@ def __init__(self, **kwargs: Unpack[DeepseekVLHybridImageProcessorKwargs]):
764764
if kwargs.get("image_mean") is None:
765765
background_color = (127, 127, 127)
766766
else:
767-
background_color = tuple([int(x * 255) for x in kwargs.get("image_mean")])
767+
background_color = tuple(int(x * 255) for x in kwargs.get("image_mean"))
768768
if kwargs.get("high_res_image_mean") is None:
769769
high_res_background_color = (127, 127, 127)
770770
else:

src/transformers/models/musicgen/modeling_musicgen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -551,7 +551,7 @@ def forward(
551551
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
552552

553553
if inputs_embeds is None:
554-
inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)])
554+
inputs_embeds = sum(self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks))
555555

556556
attention_mask = self._update_causal_mask(
557557
attention_mask,

src/transformers/models/oneformer/modeling_oneformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,7 @@ def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> tor
718718
"""
719719
Computes the average number of target masks across the batch, for normalization purposes.
720720
"""
721-
num_masks = sum([len(classes) for classes in class_labels])
721+
num_masks = sum(len(classes) for classes in class_labels)
722722
num_masks = torch.as_tensor([num_masks], dtype=torch.float, device=device)
723723
world_size = 1
724724
if is_accelerate_available():

src/transformers/models/ovis2/image_processing_ovis2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ def get_min_tile_covering_grid(
184184
for tile_grid in candidate_tile_grids:
185185
tile_regions = split_image_into_grid(image_height, image_width, tile_grid)
186186
tile_covering_ratio = (
187-
sum([compute_patch_covering_area(*region, target_patch_size) for region in tile_regions]) / image_area
187+
sum(compute_patch_covering_area(*region, target_patch_size) for region in tile_regions) / image_area
188188
)
189189

190190
evaluated_grids.append((tile_grid, tile_covering_ratio))

0 commit comments

Comments
 (0)