Skip to content

Commit 9d71d48

Browse files
authored
fix lmdeploy bug (#1550)
1 parent b5925b3 commit 9d71d48

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

swift/llm/utils/lmdeploy_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from copy import deepcopy
88
from queue import Queue
99
from threading import Thread
10-
from typing import Any, Dict, List, Optional, Tuple, Union
10+
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
1111

1212
import torch
1313
from lmdeploy import EngineGenerationConfig as _LmdeployGenerationConfig
@@ -203,9 +203,9 @@ def inference_stream_lmdeploy(lmdeploy_engine: Union[AsyncEngine, VLAsyncEngine]
203203
generation_config: Optional[LmdeployGenerationConfig] = None,
204204
generation_info: Optional[Dict[str, Any]] = None,
205205
use_tqdm: bool = False,
206-
**kwargs) -> List[Dict[str, Any]]:
206+
**kwargs) -> Iterator[List[Dict[str, Any]]]:
207207
if len(request_list) == 0:
208-
return []
208+
return
209209
start_runtime = time.perf_counter()
210210
if generation_config is None:
211211
generation_config = getattr(lmdeploy_engine, 'generation_config', LmdeployGenerationConfig())

swift/llm/utils/vllm_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ def inference_stream_vllm(
377377
The keys to be included will be: 'response', 'history'.
378378
"""
379379
if len(request_list) == 0:
380-
return []
380+
return
381381
start_runtime = time.perf_counter()
382382
if generation_config is None:
383383
generation_config = getattr(llm_engine, 'generation_config', VllmGenerationConfig())

0 commit comments

Comments
 (0)