We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 75b87fa commit 594b080Copy full SHA for 594b080
lightllm/server/detokenization/decode_req.py
@@ -32,7 +32,7 @@ def __init__(
32
self.input_len = self.req.input_len
33
self.prefix_str = ""
34
self.stop_strs: List[str] = self.req.sample_params.stop_sequences.to_strings()
35
- self.stop_str_max_len = max([len(e) for e in self.stop_strs])
+ self.stop_str_max_len = max([len(e) for e in self.stop_strs] + [0])
36
37
def init_token_healing_prefix_str(self, token_id_to_token: Dict[int, str], tokenizer):
38
tokens = [token_id_to_token[token_id] for token_id in self.req.prefix_token_ids.get_token_ids()]
0 commit comments