@@ -41,13 +41,11 @@ static constexpr auto kUseSDPAWithKVCache = "use_sdpa_with_kv_cache";
4141Runner::Runner (
4242 const std::string& model_path,
4343 const std::string& tokenizer_path,
44- const float temperature,
4544 std::optional<const std::string> data_path)
4645 // NOTE: we observed ~2x loading performance increase on iPhone 15
4746 // and a ~5% improvement on Galaxy S22 by switching to
4847 // FileDataLoader instead of MmapDataLoader + UseMlockIgnoreErrors.
49- : temperature_(temperature),
50- tokenizer_path_ (tokenizer_path),
48+ : tokenizer_path_(tokenizer_path),
5149 metadata_ ({
5250 {kEnableDynamicShape , false },
5351 {kMaxSeqLen , 128 },
@@ -133,11 +131,9 @@ Error Runner::load() {
133131 ET_LOG (Info, " eos_id = %" PRId64, value);
134132 }
135133 }
134+ // @lint-ignore CLANGTIDY facebook-hte-Deprecated
136135 text_decoder_runner_ = std::make_unique<llm::TextDecoderRunner>(
137- module_.get (),
138- metadata_.at (kUseKVCache ),
139- metadata_.at (kVocabSize ),
140- temperature_);
136+ module_.get (), metadata_.at (kUseKVCache ));
141137 text_prefiller_ = std::make_unique<llm::TextPrefiller>(
142138 text_decoder_runner_.get (),
143139 metadata_.at (kUseKVCache ),
@@ -164,11 +160,9 @@ Error Runner::load() {
164160
165161Error Runner::generate (
166162 const std::string& prompt,
167- int32_t seq_len ,
163+ const ::executorch::extension::llm::GenerationConfig& config ,
168164 std::function<void (const std::string&)> token_callback,
169- std::function<void(const llm::Stats&)> stats_callback,
170- bool echo,
171- bool warmup) {
165+ std::function<void(const llm::Stats&)> stats_callback) {
172166 // Prepare the inputs.
173167 // Use ones-initialized inputs.
174168 ET_CHECK_MSG (!prompt.empty (), " Prompt cannot be null" );
@@ -178,19 +172,19 @@ Error Runner::generate(
178172 stats_.model_load_end_ms = llm::time_in_ms ();
179173 }
180174
181- if (warmup ) {
175+ if (config. warming ) {
182176 ET_LOG (Info, " Doing a warmup run..." );
183177 }
184178
185179 RUNNER_ET_LOG (
186- warmup ,
180+ config. warming ,
187181 " RSS after loading model: %f MiB (0 if unsupported)" ,
188182 llm::get_rss_bytes () / 1024.0 / 1024.0 );
189183
190184 // Wrap the token_callback with print function
191185 std::function<void (const std::string&)> wrapped_callback =
192- [token_callback, warmup ](const std::string& piece) {
193- if (!warmup ) {
186+ [token_callback, config ](const std::string& piece) {
187+ if (!config. warming ) {
194188 llm::safe_printf (piece.c_str ());
195189 fflush (stdout);
196190 }
@@ -204,11 +198,6 @@ Error Runner::generate(
204198 stats_.inference_start_ms = llm::time_in_ms ();
205199 shouldStop_ = false ;
206200
207- // Set the sequence length to the max seq length if not provided
208- seq_len = (seq_len > 0 && seq_len <= metadata_.at (kMaxContextLen ))
209- ? seq_len
210- : metadata_.at (kMaxContextLen );
211-
212201 ::tokenizers::Result<std::vector<uint64_t >> encode_res = tokenizer_->encode (
213202 prompt,
214203 /* bos */ 0 ,
@@ -225,21 +214,22 @@ Error Runner::generate(
225214 ET_CHECK_MSG (
226215 num_prompt_tokens < metadata_.at (kMaxContextLen ),
227216 " num_prompt_tokens %d >= max_seq_len_ %" PRId64
228- " , Max seq length exceeded - please increase max seq len value in .../llama2/model.py " ,
217+ " , Max seq length exceeded - please increase max seq len value in your export script " ,
229218 num_prompt_tokens,
230219 metadata_.at (kMaxContextLen ));
231- ET_CHECK_MSG (
232- num_prompt_tokens < seq_len,
233- " num_prompt_tokens %d >= seq_len %d, Sequence length exceeded - please increase the seq_len value passed to generate()" ,
234- num_prompt_tokens,
235- seq_len);
220+
221+ // Determine max_new_tokens using the GenerationConfig's resolve method
222+ int max_new_tokens = config.resolve_max_new_tokens (
223+ metadata_.at (kMaxContextLen ), num_prompt_tokens);
224+
225+ ET_LOG (Info, " Max new tokens resolved: %d" , max_new_tokens);
236226
237227 // Prefill first
238228 // Here feed all tokens to the model and get the next predicted token
239229 // after the prompt. After that we will enter generate loop.
240230
241231 // print prompts
242- if (echo) {
232+ if (config. echo ) {
243233 wrapped_callback (prompt);
244234 }
245235 int64_t pos = 0 ;
@@ -253,32 +243,38 @@ Error Runner::generate(
253243 wrapped_callback (
254244 ET_UNWRAP_TOKENIZER (tokenizer_->decode (cur_token, cur_token)));
255245 RUNNER_ET_LOG (
256- warmup ,
246+ config. warming ,
257247 " RSS after prompt prefill: %f MiB (0 if unsupported)" ,
258248 llm::get_rss_bytes () / 1024.0 / 1024.0 );
259249
260250 // start the main loop
261251 prompt_tokens.push_back (cur_token);
252+
253+ // Generate max_new_tokens - 1 because prefill already generated 1 token.
262254 int64_t num_generated_tokens = ET_UNWRAP (text_token_generator_->generate (
263- prompt_tokens, num_prompt_tokens, seq_len, wrapped_callback));
255+ prompt_tokens,
256+ num_prompt_tokens,
257+ max_new_tokens - 1 ,
258+ config.temperature ,
259+ wrapped_callback));
264260
265261 stats_.inference_end_ms = llm::time_in_ms ();
266- if (!warmup ) {
262+ if (!config. warming ) {
267263 printf (" \n " );
268264 }
269265 RUNNER_ET_LOG (
270- warmup ,
266+ config. warming ,
271267 " RSS after finishing text generation: %f MiB (0 if unsupported)" ,
272268 llm::get_rss_bytes () / 1024.0 / 1024.0 );
273269
274- if (num_prompt_tokens + num_generated_tokens == seq_len ) {
275- RUNNER_ET_LOG (warmup , " Sequence length ( %i tokens) reached!" , seq_len );
270+ if (num_generated_tokens == max_new_tokens ) {
271+ RUNNER_ET_LOG (config. warming , " Max new tokens %i reached!" , max_new_tokens );
276272 }
277273
278274 stats_.num_prompt_tokens = num_prompt_tokens;
279275 stats_.num_generated_tokens = num_generated_tokens;
280276
281- if (warmup ) {
277+ if (config. warming ) {
282278 ET_LOG (Info, " Warmup run finished!" );
283279 } else {
284280 // Do not print report during warmup
@@ -291,14 +287,15 @@ Error Runner::generate(
291287 return Error::Ok;
292288}
293289
294- Error Runner::warmup (const std::string& prompt, int32_t seq_len) {
295- Error err = generate (
296- prompt,
297- seq_len,
298- /* token_callback=*/ nullptr ,
299- /* stats_callbak=*/ nullptr ,
300- /* echo=*/ false ,
301- /* warmup=*/ true );
290+ Error Runner::warmup (const std::string& prompt, int32_t max_new_tokens) {
291+ // Create a GenerationConfig for warmup
292+ llm::GenerationConfig config{
293+ .echo = false , .max_new_tokens = max_new_tokens, .warming = true };
294+
295+ // Call generate with the warmup config
296+ Error err = generate (prompt, config);
297+
298+ // Reset stats after warmup
302299 stats_.reset ();
303300 return err;
304301}
0 commit comments