@@ -372,14 +372,34 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab &
372372 LLAMA_LOG_DEBUG (" %s: n_outputs = %d\n " , __func__, n_outputs);
373373
374374 if (debug > 1 ) {
375+ int seq_id_max = 0 ;
376+ for (int32_t i = 0 ; i < batch.n_tokens ; ++i) {
377+ for (int s = 0 ; s < batch.n_seq_id [i]; ++s) {
378+ for (int s = 0 ; s < batch.n_seq_id [i]; ++s) {
379+ seq_id_max = std::max (seq_id_max, batch.seq_id [i][s]);
380+ }
381+ }
382+ }
383+ ++seq_id_max;
384+
375385 LLAMA_LOG_DEBUG (" %s: token = [\n " , __func__);
376386 for (int32_t i = 0 ; i < batch.n_tokens ; ++i) {
377- std::stringstream ss;
387+ std::vector<int8_t > seq_id (seq_id_max);
388+
378389 for (int s = 0 ; s < batch.n_seq_id [i]; ++s) {
379- ss << batch.seq_id [i][s] << " " ;
390+ seq_id[batch.seq_id [i][s]] = 1 ;
391+ }
392+
393+ std::stringstream ss;
394+ for (int s = 0 ; s < seq_id_max; ++s) {
395+ if (seq_id[s]) {
396+ ss << s%10 ;
397+ } else {
398+ ss << " ." ;
399+ }
380400 }
381401
382- LLAMA_LOG_DEBUG (" %s: %4d: id = %6d (%8s ), pos = %4d, n_seq_id = %2d, seq_id = [ %s], output = %d\n " ,
402+ LLAMA_LOG_DEBUG (" %s: %4d: id = %6d (%16s ), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n " ,
383403 __func__, i, batch.token [i], vocab.token_to_piece (batch.token [i]).c_str (),
384404 batch.pos [i], batch.n_seq_id [i], ss.str ().c_str (), batch.logits [i]);
385405 }
0 commit comments