Skip to content

Commit 93464a0

Browse files
authored
Prefer raising TypeError exception for invalid type (#41346)
* Fixed raising of TypeError exception for invalid type * Fixed failing tests.
1 parent 0c9a72e commit 93464a0

18 files changed

+19
-19
lines changed

src/transformers/cache_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1313,7 +1313,7 @@ def check_dynamic_cache(self, method: str):
13131313
isinstance(self.self_attention_cache, DynamicCache)
13141314
and isinstance(self.cross_attention_cache, DynamicCache)
13151315
):
1316-
raise ValueError(
1316+
raise TypeError(
13171317
f"`{method}` is only defined for dynamic cache, got {self.self_attention_cache.__str__()} for the self "
13181318
f"attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache."
13191319
)

src/transformers/commands/serving.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1186,7 +1186,7 @@ def generate_response(self, req: dict) -> Generator[str, None, None]:
11861186
inputs = [{"role": "system", "content": req["instructions"]}] if "instructions" in req else []
11871187
inputs.append(req["input"])
11881188
else:
1189-
raise ValueError("inputs should be a list, dict, or str")
1189+
raise TypeError("inputs should be a list, dict, or str")
11901190

11911191
inputs = processor.apply_chat_template(inputs, add_generation_prompt=True, return_tensors="pt")
11921192
inputs = inputs.to(model.device)

src/transformers/models/evolla/processing_evolla.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def __call__(
165165
if isinstance(messages_list, (list, tuple)):
166166
for messages in messages_list:
167167
if not isinstance(messages, (list, tuple)):
168-
raise ValueError(f"Each messages in messages_list should be a list instead of {type(messages)}.")
168+
raise TypeError(f"Each messages in messages_list should be a list instead of {type(messages)}.")
169169
if not all(isinstance(m, dict) for m in messages):
170170
raise ValueError(
171171
"Each message in messages_list should be a list of dictionaries, but not all elements are dictionaries."

src/transformers/models/gemma3n/processing_gemma3n.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def __call__(
107107
if isinstance(text, str):
108108
text = [text]
109109
elif not isinstance(text, list) and not isinstance(text[0], str):
110-
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
110+
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
111111

112112
if audio is not None:
113113
audio_inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"])

src/transformers/models/lfm2_vl/processing_lfm2_vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def __call__(
135135
if isinstance(text, str):
136136
text = [text]
137137
elif not isinstance(text, list) and not isinstance(text[0], str):
138-
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
138+
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
139139

140140
n_images_in_text = [sample.count(self.image_token) for sample in text]
141141
if sum(n_images_in_text) > 0 and images is None:

src/transformers/models/ovis2/processing_ovis2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def __call__(
118118
if isinstance(text, str):
119119
text = [text]
120120
elif not isinstance(text, list) and not isinstance(text[0], str):
121-
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
121+
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
122122

123123
image_inputs = {}
124124

src/transformers/models/perception_lm/processing_perception_lm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def __call__(
144144
if isinstance(text, str):
145145
text = [text]
146146
elif not isinstance(text, list) and not isinstance(text[0], str):
147-
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
147+
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
148148

149149
# try to expand inputs in processing if we have the necessary parts
150150
prompt_strings = []

src/transformers/models/sam/image_processing_sam_fast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ def post_process_masks(
390390
if isinstance(masks[i], np.ndarray):
391391
masks[i] = torch.from_numpy(masks[i])
392392
elif not isinstance(masks[i], torch.Tensor):
393-
raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
393+
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
394394
interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False)
395395
interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]
396396
interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False)

src/transformers/models/sam2/image_processing_sam2_fast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -669,7 +669,7 @@ def post_process_masks(
669669
if isinstance(masks[i], np.ndarray):
670670
masks[i] = torch.from_numpy(masks[i])
671671
elif not isinstance(masks[i], torch.Tensor):
672-
raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
672+
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
673673
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
674674
if apply_non_overlapping_constraints:
675675
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)

src/transformers/models/sam2/modular_sam2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ def post_process_masks(
287287
if isinstance(masks[i], np.ndarray):
288288
masks[i] = torch.from_numpy(masks[i])
289289
elif not isinstance(masks[i], torch.Tensor):
290-
raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
290+
raise TypeError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
291291
interpolated_mask = F.interpolate(masks[i], original_size, mode="bilinear", align_corners=False)
292292
if apply_non_overlapping_constraints:
293293
interpolated_mask = self._apply_non_overlapping_constraints(interpolated_mask)

0 commit comments

Comments
 (0)