@@ -53,7 +53,8 @@ def __free_port(port):
53
53
@dataclass
54
54
class ServerArgument :
55
55
port : int = field (default = 8011 , metadata = {"help" : "The port of ui service" })
56
- base_port : int = field (default = 8010 , metadata = {"help" : "The port of flask service" })
56
+ base_port : int = field (default = None , metadata = {"help" : "The port of flask service" })
57
+ flask_port : int = field (default = None , metadata = {"help" : "The port of flask service" })
57
58
title : str = field (default = "LLM" , metadata = {"help" : "The title of gradio" })
58
59
sub_title : str = field (default = "LLM-subtitle" , metadata = {"help" : "The sub-title of gradio" })
59
60
@@ -64,8 +65,8 @@ def __init__(self, args: ServerArgument, predictor: BasePredictor):
64
65
self .predictor = predictor
65
66
self .args = args
66
67
scan_l , scan_u = (
67
- self .args .base_port + port_interval * predictor .tensor_parallel_rank ,
68
- self .args .base_port + port_interval * (predictor .tensor_parallel_rank + 1 ),
68
+ self .args .flask_port + port_interval * predictor .tensor_parallel_rank ,
69
+ self .args .flask_port + port_interval * (predictor .tensor_parallel_rank + 1 ),
69
70
)
70
71
71
72
if self .predictor .tensor_parallel_rank == 0 :
@@ -174,6 +175,14 @@ def start_ui_service(self, args):
174
175
175
176
parser = PdArgumentParser ((PredictorArgument , ModelArgument , ServerArgument ))
176
177
predictor_args , model_args , server_args = parser .parse_args_into_dataclasses ()
178
+ # check port
179
+ if server_args .base_port is not None :
180
+ logger .warning ("`--base_port` is deprecated, please use `--flask_port` instead after 2023.12.30." )
181
+
182
+ if server_args .flask_port is None :
183
+ server_args .flask_port = server_args .base_port
184
+ else :
185
+ logger .warning ("`--base_port` and `--flask_port` are both set, `--base_port` will be ignored." )
177
186
178
187
log_dir = os .getenv ("PADDLE_LOG_DIR" , "./" )
179
188
PORT_FILE = os .path .join (log_dir , PORT_FILE )
0 commit comments