Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit dc3d35e

Browse files
authored
[Easy] Remove unused variable and comments (#1256)
1 parent fc96127 commit dc3d35e

File tree

1 file changed

+0
-9
lines changed

1 file changed

+0
-9
lines changed

torchchat/generate.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,6 @@ def __init__(
262262
"""
263263
))
264264
# fmt: on
265-
# raise RuntimeError("You need to use --is-chat-model to indicate model has chat support.")
266265
self.system_prompt = generator_args.prompt
267266
self.tokenizer = _initialize_tokenizer(self.tokenizer_args)
268267

@@ -493,7 +492,6 @@ def decode_n_tokens(
493492
next_prob.clone() if next_prob is not None else None
494493
)
495494

496-
# return new_tokens, new_probs
497495

498496
def model_forward(self, model, x, input_pos):
499497
return model(x, input_pos)
@@ -593,8 +591,6 @@ def generate(
593591
is_speculative = draft_model is not None
594592
device, dtype = prompt.device, prompt.dtype
595593

596-
# create an empty tensor of the expected final shape and
597-
# fill in the current tokens
598594
if len(prompt.shape) > 1:
599595
prompt = prompt.squeeze(0)
600596
T = prompt.size(0)
@@ -620,11 +616,6 @@ def generate(
620616
if model.config.model_type == ModelType.Flamingo:
621617
model.reset_caches()
622618

623-
# create an empty tensor of the expected final shape and
624-
# fill in the current tokens
625-
empty = torch.empty(T_new, dtype=dtype, device=device)
626-
empty[:T] = prompt
627-
628619
input_pos = torch.arange(
629620
start_pos, T + start_pos, device=device, dtype=torch.int
630621
)

0 commit comments

Comments
 (0)