Skip to content

Commit 9140561

Browse files
authored
[Minor] Fix typo and remove unused code (#2305)
1 parent 77af974 commit 9140561

File tree

2 files changed

+1
-22
lines changed

2 files changed

+1
-22
lines changed

vllm/model_executor/layers/sampler.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -112,27 +112,6 @@ def _prune_hidden_states(
112112
sampling_metadata.selected_token_indices)
113113

114114

115-
def _get_prompt_and_output_tokens(
116-
sampling_metadata: SamplingMetadata,
117-
) -> Tuple[List[List[int]], List[List[int]]]:
118-
prompt_tokens: List[List[int]] = []
119-
output_tokens: List[List[int]] = []
120-
for i, seq_group in enumerate(sampling_metadata.seq_groups):
121-
seq_ids, sampling_params = seq_group
122-
if (i < sampling_metadata.num_prompts
123-
and sampling_params.prompt_logprobs is not None):
124-
# NOTE: prompt token positions do not need output tokens to
125-
# compute penalties.
126-
prompt_len = sampling_metadata.prompt_lens[i]
127-
prompt_tokens.extend([] for _ in range(prompt_len - 1))
128-
output_tokens.extend([] for _ in range(prompt_len - 1))
129-
for seq_id in seq_ids:
130-
seq_data = sampling_metadata.seq_data[seq_id]
131-
prompt_tokens.append(seq_data.prompt_token_ids)
132-
output_tokens.append(seq_data.output_token_ids)
133-
return prompt_tokens, output_tokens
134-
135-
136115
def _get_bin_counts_and_mask(
137116
tokens: torch.Tensor,
138117
vocab_size: int,

vllm/sampling_params.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def __init__(
100100
temperature: float = 1.0,
101101
top_p: float = 1.0,
102102
top_k: int = -1,
103-
min_p: int = 0.0,
103+
min_p: float = 0.0,
104104
use_beam_search: bool = False,
105105
length_penalty: float = 1.0,
106106
early_stopping: Union[bool, str] = False,

0 commit comments

Comments
 (0)