Skip to content

Commit 6d4cb06

Browse files
committed
fix
1 parent 866cd79 commit 6d4cb06

File tree

2 files changed

+1
-7
lines changed

2 files changed

+1
-7
lines changed

lightllm/models/qwen3/layer_weights/transformer_layer_weight.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,6 @@
1919

2020
class Qwen3TransformerLayerWeight(LlamaTransformerLayerWeight):
2121
def __init__(self, layer_num, data_type, network_config, mode=[], quant_cfg=None):
22-
self.n_routed_experts = network_config["num_experts"]
23-
self.is_moe = (
24-
network_config["num_experts"] > 0
25-
and layer_num not in network_config["mlp_only_layers"]
26-
and (layer_num + 1) % network_config["decoder_sparse_step"] == 0
27-
)
2822
super().__init__(layer_num, data_type, network_config, mode, quant_cfg)
2923
return
3024

lightllm/server/api_models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ class ChatCompletionRequest(BaseModel):
7676
ignore_eos: Optional[bool] = False
7777
role_settings: Optional[Dict[str, str]] = None
7878
character_settings: Optional[List[Dict[str, str]]] = None
79-
chat_template_kwargs: Optional[Dict[str, str]] = None
79+
chat_template_kwargs: Optional[Dict[str, bool]] = None
8080

8181

8282
class FunctionResponse(BaseModel):

0 commit comments

Comments
 (0)