Skip to content

Commit fbeec3c

Browse files
committed
update python env
1 parent 64f4b84 commit fbeec3c

File tree

6 files changed

+148
-127
lines changed

6 files changed

+148
-127
lines changed

docker-compose.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ services:
1010
# image: docker.1ms.run/506610466/gpt_server:latest
1111
image: gpt_server:latest_
1212
container_name: gpt_server
13-
shm_size: '8g' # 设置共享内存为4GB
13+
shm_size: '32g' # 设置共享内存为4GB
1414
restart: always
1515
# network_mode: host
1616
ports:

gpt_server/model_backend/lmdeploy_backend.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,11 @@
2525
"lmdeploy-pytorch": "pytorch", # pytorch后端
2626
"lmdeploy-turbomind": "turbomind", # turbomind后端
2727
}
28+
log_level = os.getenv("log_level", "WARNING")
2829
from lmdeploy.utils import get_logger
2930

30-
get_logger("lmdeploy").setLevel("WARNING")
31-
os.environ["TM_LOG_LEVEL"] = "ERROR"
31+
get_logger("lmdeploy").setLevel(log_level) # 默认WARNING
32+
os.environ["TM_LOG_LEVEL"] = "WARNING"
3233

3334

3435
def is_stop(output: str, stop_str: str):

gpt_server/serving/openai_api_server.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,6 @@ def get_gen_params(
308308
# ------- TODO add messages tools -------
309309
gen_params["response_format"] = response_format
310310
gen_params["reasoning_parser"] = reasoning_parser
311-
logger.debug(f"==== request ====\n{gen_params}")
312311
return gen_params
313312

314313

pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@ dependencies = [
1212
"ffmpy",
1313
"fschat==0.2.36",
1414
"infinity-emb[all]==0.0.76",
15-
"lmdeploy==0.8.0",
15+
"lmdeploy==0.9.0",
1616
"loguru>=0.7.2",
1717
"openai==1.86.0",
1818
"setuptools==75.2.0",
1919
"streamlit==1.39.0",
2020
"torch==2.6.0",
2121
"torchvision==0.20.1",
22-
"vllm==0.9.0.1",
22+
"vllm==0.9.1",
2323
"qwen_vl_utils",
2424
"evalscope[perf,rag]==0.16.1",
2525
"modelscope==1.26.0",
@@ -35,7 +35,7 @@ dependencies = [
3535
default-groups = [] # 默认只安装dependencies中的库
3636
override-dependencies = [
3737
"setuptools==75.2.0",
38-
"torchvision==0.22.0",
38+
"torchvision==0.22.1",
3939
"torch==2.7.0",
4040
"triton",
4141
"outlines==0.1.11",

requirements.txt

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ aiohttp==3.12.13
2727
# vllm
2828
aiosignal==1.3.2
2929
# via aiohttp
30-
airportsdata==20250523
30+
airportsdata==20250622
3131
# via outlines
3232
aliyun-python-sdk-core==2.16.0
3333
# via
@@ -39,7 +39,7 @@ altair==5.5.0
3939
# via streamlit
4040
annotated-types==0.7.0
4141
# via pydantic
42-
anthropic==0.54.0
42+
anthropic==0.55.0
4343
# via sglang
4444
antlr4-python3-runtime==4.9.3
4545
# via
@@ -122,7 +122,7 @@ coloredlogs==15.0.1
122122
# via onnxruntime
123123
colpali-engine==0.3.10
124124
# via infinity-emb
125-
compressed-tensors==0.9.4
125+
compressed-tensors==0.10.1
126126
# via
127127
# sglang
128128
# vllm
@@ -160,7 +160,7 @@ decord==0.6.0
160160
# via sglang
161161
depyf==0.18.0
162162
# via vllm
163-
diffusers==0.33.1
163+
diffusers==0.34.0
164164
# via gpt-server (pyproject.toml)
165165
dill==0.3.8
166166
# via
@@ -298,7 +298,7 @@ httpx==0.28.1
298298
# langsmith
299299
# litellm
300300
# openai
301-
httpx-sse==0.4.0
301+
httpx-sse==0.4.1
302302
# via langchain-community
303303
huggingface-hub==0.33.0
304304
# via
@@ -412,7 +412,7 @@ langchain-core==0.3.66
412412
# langchain-openai
413413
# langchain-text-splitters
414414
# ragas
415-
langchain-openai==0.3.24
415+
langchain-openai==0.3.25
416416
# via
417417
# evalscope
418418
# ragas
@@ -439,9 +439,9 @@ librosa==0.11.0
439439
# via
440440
# flashtts
441441
# funasr
442-
litellm==1.72.7
442+
litellm==1.73.1
443443
# via sglang
444-
llguidance==0.7.29
444+
llguidance==0.7.30
445445
# via
446446
# sglang
447447
# vllm
@@ -451,7 +451,7 @@ llvmlite==0.44.0
451451
# pynndescent
452452
lm-format-enforcer==0.10.11
453453
# via vllm
454-
lmdeploy==0.8.0
454+
lmdeploy==0.9.0
455455
# via gpt-server (pyproject.toml)
456456
loguru==0.7.3
457457
# via gpt-server (pyproject.toml)
@@ -499,15 +499,15 @@ msgspec==0.19.0
499499
# vllm
500500
mteb==1.38.20
501501
# via evalscope
502-
multidict==6.5.0
502+
multidict==6.5.1
503503
# via
504504
# aiohttp
505505
# yarl
506506
multiprocess==0.70.16
507507
# via datasets
508508
mypy-extensions==1.1.0
509509
# via typing-inspect
510-
narwhals==1.43.1
510+
narwhals==1.44.0
511511
# via altair
512512
nest-asyncio==1.6.0
513513
# via
@@ -737,7 +737,7 @@ pandas==2.3.0
737737
# streamlit
738738
parso==0.8.4
739739
# via jedi
740-
partial-json-parser==0.2.1.1.post5
740+
partial-json-parser==0.2.1.1.post6
741741
# via
742742
# lmdeploy
743743
# sglang
@@ -854,11 +854,11 @@ pydantic==2.11.7
854854
# xgrammar
855855
pydantic-core==2.33.2
856856
# via pydantic
857-
pydantic-settings==2.9.1
857+
pydantic-settings==2.10.1
858858
# via langchain-community
859859
pydeck==0.9.1
860860
# via streamlit
861-
pygments==2.19.1
861+
pygments==2.19.2
862862
# via
863863
# ipython
864864
# markdown2
@@ -881,7 +881,7 @@ python-dateutil==2.9.0.post0
881881
# modelscope
882882
# pandas
883883
# posthog
884-
python-dotenv==1.1.0
884+
python-dotenv==1.1.1
885885
# via
886886
# litellm
887887
# pydantic-settings
@@ -1153,7 +1153,7 @@ tiktoken==0.9.0
11531153
# xgrammar
11541154
timm==1.0.15
11551155
# via infinity-emb
1156-
tokenizers==0.21.1
1156+
tokenizers==0.21.2
11571157
# via
11581158
# litellm
11591159
# transformers
@@ -1195,7 +1195,7 @@ torchao==0.9.0
11951195
# via sglang
11961196
torchaudio==2.7.0
11971197
# via vllm
1198-
torchvision==0.22.0
1198+
torchvision==0.22.1
11991199
# via
12001200
# --override (workspace)
12011201
# gpt-server (pyproject.toml)
@@ -1321,7 +1321,7 @@ uvloop==0.21.0
13211321
# via
13221322
# sglang
13231323
# uvicorn
1324-
vllm==0.9.0.1
1324+
vllm==0.9.1
13251325
# via gpt-server (pyproject.toml)
13261326
watchdog==5.0.3
13271327
# via streamlit

0 commit comments

Comments
 (0)