File tree Expand file tree Collapse file tree 1 file changed +6
-2
lines changed
examples/models/llama2/runner Expand file tree Collapse file tree 1 file changed +6
-2
lines changed Original file line number Diff line number Diff line change @@ -152,11 +152,15 @@ Error Runner::generate(
152152 ET_CHECK_MSG (num_prompt_tokens >= 1 , " Expected at least 1 prompt token" );
153153 ET_CHECK_MSG (
154154 num_prompt_tokens < max_seq_len_,
155- " Max seq length exceeded - please increase max seq len value in .../llama2/model.py" );
155+ " num_prompt_tokens %d >= max_seq_len_ %d, Max seq length exceeded - please increase max seq len value in .../llama2/model.py" ,
156+ num_prompt_tokens,
157+ max_seq_len_);
156158
157159 ET_CHECK_MSG (
158160 num_prompt_tokens < seq_len,
159- " Sequence length exceeded - please increase the seq_len value passed to generate()" );
161+ " num_prompt_tokens %d >= seq_len %d, Sequence length exceeded - please increase the seq_len value passed to generate()" ,
162+ num_prompt_tokens,
163+ seq_len);
160164
161165 // Prefill first
162166 // Here feed all tokens to the model and get the next predicted token
You can’t perform that action at this time.
0 commit comments