Skip to content

Commit 9b08735

Browse files
authored
fix vllm eos_token (#973)
1 parent 8a794ff commit 9b08735

File tree

2 files changed

+3
-0
lines changed

2 files changed

+3
-0
lines changed

swift/llm/deploy.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from typing import List, Optional, Union
88

99
import json
10+
import torch
1011
from fastapi import FastAPI, Request
1112
from fastapi.responses import JSONResponse, StreamingResponse
1213
from modelscope import GenerationConfig

swift/llm/utils/vllm_utils.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,7 @@ def __init__(
134134
n: int = 1,
135135
length_penalty: float = 1.,
136136
stop: Optional[List[str]] = None,
137+
skip_special_tokens: bool = False,
137138
**kwargs,
138139
) -> None:
139140
# The parameter design is similar to transformers.GenerationConfig.
@@ -162,6 +163,7 @@ def __init__(
162163
kwargs['n'] = n
163164
kwargs['length_penalty'] = length_penalty
164165
kwargs['stop'] = stop
166+
kwargs['skip_special_tokens'] = skip_special_tokens
165167
parameters = inspect.signature(SamplingParams.__init__).parameters
166168
for k in kwargs.copy().keys():
167169
if k not in parameters:

0 commit comments

Comments
 (0)