File tree Expand file tree Collapse file tree 2 files changed +1
-22
lines changed Expand file tree Collapse file tree 2 files changed +1
-22
lines changed Original file line number Diff line number Diff line change @@ -112,27 +112,6 @@ def _prune_hidden_states(
112
112
sampling_metadata .selected_token_indices )
113
113
114
114
115
- def _get_prompt_and_output_tokens (
116
- sampling_metadata : SamplingMetadata ,
117
- ) -> Tuple [List [List [int ]], List [List [int ]]]:
118
- prompt_tokens : List [List [int ]] = []
119
- output_tokens : List [List [int ]] = []
120
- for i , seq_group in enumerate (sampling_metadata .seq_groups ):
121
- seq_ids , sampling_params = seq_group
122
- if (i < sampling_metadata .num_prompts
123
- and sampling_params .prompt_logprobs is not None ):
124
- # NOTE: prompt token positions do not need output tokens to
125
- # compute penalties.
126
- prompt_len = sampling_metadata .prompt_lens [i ]
127
- prompt_tokens .extend ([] for _ in range (prompt_len - 1 ))
128
- output_tokens .extend ([] for _ in range (prompt_len - 1 ))
129
- for seq_id in seq_ids :
130
- seq_data = sampling_metadata .seq_data [seq_id ]
131
- prompt_tokens .append (seq_data .prompt_token_ids )
132
- output_tokens .append (seq_data .output_token_ids )
133
- return prompt_tokens , output_tokens
134
-
135
-
136
115
def _get_bin_counts_and_mask (
137
116
tokens : torch .Tensor ,
138
117
vocab_size : int ,
Original file line number Diff line number Diff line change @@ -100,7 +100,7 @@ def __init__(
100
100
temperature : float = 1.0 ,
101
101
top_p : float = 1.0 ,
102
102
top_k : int = - 1 ,
103
- min_p : int = 0.0 ,
103
+ min_p : float = 0.0 ,
104
104
use_beam_search : bool = False ,
105
105
length_penalty : float = 1.0 ,
106
106
early_stopping : Union [bool , str ] = False ,
You can’t perform that action at this time.
0 commit comments