Skip to content

Commit f590d4c

Browse files
committed
fix(chat): remove the duplicate_content arg in llm_messages_append to avoid a crash on llm_chat_free()
Refactored llm_messages_append to always duplicate message content using sqlite_strdup, removing the duplicate_content parameter. This option was causing a double-free crash in llm_chat_free with non-duplicated content. Also, the ai->chat.response buffer is reset on new prompts in the same chat so we cannot rely on that buffer to store previous response messages.
1 parent 9469601 commit f590d4c

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

src/sqlite-ai.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -784,7 +784,7 @@ static bool llm_check_context (sqlite3_context *context) {
784784

785785
// MARK: - Chat Messages -
786786

787-
bool llm_messages_append (ai_messages *list, const char *role, const char *content, bool duplicate_content) {
787+
bool llm_messages_append (ai_messages *list, const char *role, const char *content) {
788788
if (list->count >= list->capacity) {
789789
size_t new_cap = list->capacity ? list->capacity * 2 : MIN_ALLOC_MESSAGES;
790790
llama_chat_message *new_items = sqlite3_realloc64(list->items, new_cap * sizeof(llama_chat_message));
@@ -796,7 +796,7 @@ bool llm_messages_append (ai_messages *list, const char *role, const char *conte
796796

797797
bool duplicate_role = ((role != ROLE_USER) && (role != ROLE_ASSISTANT));
798798
list->items[list->count].role = (duplicate_role) ? sqlite_strdup(role) : role;
799-
list->items[list->count].content = (duplicate_content) ? sqlite_strdup(content) : content;
799+
list->items[list->count].content = sqlite_strdup(content);
800800
list->count += 1;
801801
return true;
802802
}
@@ -1512,7 +1512,7 @@ static bool llm_chat_save_response (ai_context *ai, ai_messages *messages, const
15121512
char *response = ai->chat.response.data;
15131513
if (!response) return false;
15141514

1515-
if (!llm_messages_append(messages, ROLE_ASSISTANT, response, false)) {
1515+
if (!llm_messages_append(messages, ROLE_ASSISTANT, response)) {
15161516
sqlite_common_set_error (ai->context, ai->vtab, SQLITE_ERROR, "Failed to append response");
15171517
return false;
15181518
}
@@ -1643,7 +1643,7 @@ static bool llm_chat_run (ai_context *ai, ai_cursor *c, const char *user_prompt)
16431643
buffer_t *formatted = &ai->chat.formatted;
16441644

16451645
// save prompt input in history
1646-
if (!llm_messages_append(messages, ROLE_USER, user_prompt, true)) {
1646+
if (!llm_messages_append(messages, ROLE_USER, user_prompt)) {
16471647
sqlite_common_set_error (ai->context, ai->vtab, SQLITE_ERROR, "Failed to append message");
16481648
return false;
16491649
}
@@ -1979,7 +1979,7 @@ static void llm_chat_restore (sqlite3_context *context, int argc, sqlite3_value
19791979
const char *role = (const char *)sqlite3_column_text(vm, 0);
19801980
const char *content = (const char *)sqlite3_column_text(vm, 1);
19811981

1982-
if (!llm_messages_append(messages, role, content, true)) {
1982+
if (!llm_messages_append(messages, role, content)) {
19831983
sqlite_common_set_error (ai->context, ai->vtab, SQLITE_ERROR, "Failed to append response");
19841984
rc = SQLITE_OK;
19851985
goto abort_restore;

0 commit comments

Comments
 (0)