Skip to content

Commit d386ab1

Browse files
maang-hDarkLight1337yewentao256
authored
[Docs] Improve malformed exception caused by backslash line continuations (vllm-project#31694)
Signed-off-by: maang <[email protected]> Signed-off-by: maang <[email protected]> Co-authored-by: Cyrus Leung <[email protected]> Co-authored-by: Wentao Ye <[email protected]>
1 parent ccb309a commit d386ab1

File tree

7 files changed

+14
-17
lines changed

7 files changed

+14
-17
lines changed

vllm/config/compilation.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -839,9 +839,9 @@ def init_backend(self, vllm_config: "VllmConfig") -> str | Callable:
839839
"""
840840
if self.mode is None:
841841
raise ValueError(
842-
"No compilation mode is set. This method should only be \
843-
called via vllm config where the level is set if none is \
844-
provided."
842+
"No compilation mode is set. This method should only be "
843+
"called via vllm config where the level is set if none is "
844+
"provided."
845845
)
846846
if self.mode == CompilationMode.NONE:
847847
raise ValueError("No compilation mode is set.")

vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,8 +231,7 @@ def rocm_aiter_fused_experts(
231231
# w8a8 block-scaled
232232
if quant_config.block_shape is not None and quant_config.use_fp8_w8a8:
233233
assert not apply_router_weight_on_input, (
234-
"apply_router_weight_on_input is\
235-
not supported for block scaled moe"
234+
"apply_router_weight_on_input is not supported for block scaled moe"
236235
)
237236
assert quant_config.w1_scale is not None
238237
assert quant_config.w2_scale is not None

vllm/model_executor/model_loader/weight_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -681,8 +681,8 @@ def safetensors_weights_iterator(
681681
# instead we reconstruct the subclasses here before returning
682682
if not torchao_version_at_least("0.15.0"):
683683
raise ValueError(
684-
"Please use torchao version >= 0.15.0 \
685-
to load torchao safetensors checkpoint"
684+
"Please use torchao version >= 0.15.0 "
685+
"to load torchao safetensors checkpoint"
686686
)
687687
from torchao.prototype.safetensors.safetensors_support import (
688688
unflatten_tensor_state_dict,

vllm/model_executor/models/llama.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -201,8 +201,8 @@ def __init__(
201201
# This is a target model, use layer_idx directly
202202
effective_layer_idx = layer_idx
203203
assert effective_layer_idx < len(layer_types), (
204-
f"effective_layer_idx: {effective_layer_idx} \
205-
is out of bounds for layer_types: {layer_types}"
204+
f"effective_layer_idx: {effective_layer_idx} "
205+
f"is out of bounds for layer_types: {layer_types}"
206206
)
207207

208208
is_sliding = layer_types[effective_layer_idx] == "sliding_attention"

vllm/model_executor/models/phi4mm.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -428,14 +428,13 @@ def forward(
428428
output_imgs.append(torch.cat([sub_img, self.glb_GN, glb_img], dim=1))
429429
else:
430430
raise NotImplementedError(
431-
f'hd_transform_order = {self.hd_transform_order}, "\
432-
"not implemented'
431+
f"hd_transform_order = {self.hd_transform_order}, not implemented"
433432
)
434433

435434
# temp_len = int((h*w+1)*144 + 1 + (h+1)*12)
436435
assert temp_len == output_imgs[-1].shape[1], (
437-
f'temp_len: {temp_len}, output_imgs[-1].shape[1]: "\
438-
"{output_imgs[-1].shape[1]}'
436+
f"temp_len: {temp_len}, output_imgs[-1].shape[1]: "
437+
f"{output_imgs[-1].shape[1]}"
439438
)
440439

441440
output_len.append(temp_len)

vllm/multimodal/registry.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -367,8 +367,8 @@ def get_encdec_max_encoder_len(self, model_config: "ModelConfig") -> int:
367367
# than whisper.
368368
return 0
369369
assert len(max_tokens) == 1, (
370-
"Encoder-decoder models are expected \
371-
to implement the multimodal interface with at most one modality."
370+
"Encoder-decoder models are expected "
371+
"to implement the multimodal interface with at most one modality."
372372
)
373373

374374
first_modality = next(iter(max_tokens))

vllm/utils/argparse_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -399,8 +399,7 @@ def _pull_args_from_config(self, args: list[str]) -> list[str]:
399399
index = args.index("--config")
400400
if index == len(args) - 1:
401401
raise ValueError(
402-
"No config file specified! \
403-
Please check your command-line arguments."
402+
"No config file specified! Please check your command-line arguments."
404403
)
405404

406405
file_path = args[index + 1]

0 commit comments

Comments
 (0)