@@ -885,7 +885,7 @@ static void hellaswag_score(llama_context * ctx, const common_params & params) {
885885 // each task has 4 unique sequence ids - one for each ending
886886 // the common prefix is shared among the 4 sequences to save tokens
887887 // we extract logits only from the last common token and from all ending tokens of each sequence
888- while (n_cur + (int ) hs_data[i1].required_tokens <= n_ctx) {
888+ while (n_cur >= 0 && n_cur + (int ) hs_data[i1].required_tokens <= n_ctx && n_cur + ( int ) hs_data[i1]. required_tokens >= n_cur ) {
889889 auto & hs_cur = hs_data[i1];
890890 int n_logits = 0 ;
891891
@@ -954,7 +954,12 @@ static void hellaswag_score(llama_context * ctx, const common_params & params) {
954954 auto & hs_cur = hs_data[i];
955955
956956 // get the logits of the last token of the common prefix
957- std::memcpy (tok_logits.data (), batch_logits.data () + hs_cur.i_logits *n_vocab, n_vocab*sizeof (float ));
957+ size_t offset = hs_cur.i_logits * n_vocab;
958+ if (offset > batch_logits.size () || offset + n_vocab > batch_logits.size ()) {
959+ LOG_ERR (" %s: logits offset out of bounds\n " , __func__);
960+ return ;
961+ }
962+ std::memcpy (tok_logits.data (), batch_logits.data () + offset, n_vocab*sizeof (float ));
958963
959964 const auto first_probs = softmax (tok_logits);
960965
@@ -1078,7 +1083,9 @@ static std::vector<winogrande_entry> load_winogrande_from_csv(const std::string
10781083 result.emplace_back ();
10791084 auto & wg = result.back ();
10801085 wg.first = sentence.substr (0 , where);
1081- wg.second = sentence.substr (where + 1 , sentence.size () - where - 1 );
1086+ if (where + 1 < sentence.size ()) {
1087+ wg.second = sentence.substr (where + 1 , sentence.size () - where - 1 );
1088+ }
10821089 wg.choices [0 ] = std::move (choice1);
10831090 wg.choices [1 ] = std::move (choice2);
10841091 wg.answer = i_answer;
0 commit comments