Skip to content

Commit 7ac061a

Browse files
ggerganovmglambda
authored andcommitted
simple-chat : fix BOS being added to each message (ggml-org#11278)
1 parent eda6c3e commit 7ac061a

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

examples/simple-chat/simple-chat.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,11 @@ int main(int argc, char ** argv) {
9595
llama_sampler_chain_add(smpl, llama_sampler_init_dist(LLAMA_DEFAULT_SEED));
9696

9797
// helper function to evaluate a prompt and generate a response
98-
auto generate = [&](const std::string & prompt) {
98+
auto generate = [&](const std::string & prompt, bool is_first) {
9999
std::string response;
100100

101101
// tokenize the prompt
102-
const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, true, true);
102+
const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true);
103103
std::vector<llama_token> prompt_tokens(n_prompt_tokens);
104104
if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), llama_get_kv_cache_used_cells(ctx) == 0, true) < 0) {
105105
GGML_ABORT("failed to tokenize the prompt\n");
@@ -180,7 +180,7 @@ int main(int argc, char ** argv) {
180180

181181
// generate a response
182182
printf("\033[33m");
183-
std::string response = generate(prompt);
183+
std::string response = generate(prompt, prev_len == 0);
184184
printf("\n\033[0m");
185185

186186
// add the response to the messages

0 commit comments

Comments
 (0)