Skip to content

Commit 713fda7

Browse files
akaitsuki-iiGlaceon-Hyy
authored andcommitted
fix qwen2 tokenizer
1 parent 10bff22 commit 713fda7

File tree

2 files changed

+8
-6
lines changed

2 files changed

+8
-6
lines changed

diffsynth_engine/tokenizers/qwen2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -197,8 +197,8 @@ def __call__(
197197
encoded.fill_(self.pad_token_id)
198198
attention_mask = torch.zeros(len(texts), max_length, dtype=torch.long)
199199
for i, ids in enumerate(batch_ids):
200-
if len(ids) > self.model_max_length:
201-
ids = ids[: self.model_max_length]
200+
if len(ids) > max_length:
201+
ids = ids[:max_length]
202202
ids[-1] = self.eos_token_id
203203
if padding_side == "right":
204204
encoded[i, : len(ids)] = torch.tensor(ids)

diffsynth_engine/utils/parallel.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -304,12 +304,14 @@ def wrap_for_parallel(module: PreTrainedModel):
304304
if rank == 0:
305305
queue_out.put(res)
306306
dist.barrier()
307-
except Exception as e:
307+
except Exception:
308308
import traceback
309309

310-
traceback.print_exc()
311-
logger.error(f"Error in worker loop (rank {rank}): {e}")
312-
queue_out.put(e) # any exception caught in the worker will be raised to the main process
310+
msg = traceback.format_exc()
311+
err = RuntimeError(msg)
312+
logger.error(f"Error in worker loop (rank {rank}): {msg}")
313+
if rank == 0:
314+
queue_out.put(err) # any exception caught in the worker will be raised to the main process
313315
finally:
314316
del module
315317
torch.cuda.synchronize()

0 commit comments

Comments
 (0)