Skip to content

Commit 3d2650c

Browse files
author
ochafik
committed
fix gcc build
1 parent 749a21c commit 3d2650c

File tree

8 files changed

+24
-29
lines changed

8 files changed

+24
-29
lines changed

common/common.h

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -471,16 +471,17 @@ std::string llama_detokenize(
471471
// Chat template utils
472472
//
473473

474+
struct llama_chat_msg_tool_call {
475+
std::string name;
476+
std::string arguments;
477+
};
478+
474479
// same as llama_chat_message, but uses std::string and std::vector
475480
struct llama_chat_msg {
476481
std::string role;
477482
std::string content;
478483
std::string tool;
479-
struct llama_tool_call {
480-
std::string name;
481-
std::string arguments;
482-
};
483-
std::vector<llama_tool_call> tool_calls;
484+
std::vector<struct llama_chat_msg_tool_call> tool_calls;
484485
};
485486

486487
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
@@ -571,8 +572,8 @@ class llama_antiprompts {
571572
// The Aho–Corasick algorithm allows efficient string matching with multiple patterns.
572573
// See https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm
573574
struct TrieNode {
574-
std::unordered_map<char, TrieNode> children;
575-
TrieNode* fail = nullptr;
575+
std::unordered_map<char, struct TrieNode> children;
576+
struct TrieNode* fail = nullptr;
576577
int output = -1;
577578
size_t depth = 0;
578579

common/json-schema-to-grammar.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1041,15 +1041,15 @@ std::string json_schema_to_grammar(const json & schema) {
10411041
}
10421042

10431043
std::string build_grammar(const std::function<void(const llama_grammar_builder &)> & cb) {
1044-
SchemaConverter converter([&](const std::string & name) { return json(); }, /* dotall= */ false);
1044+
SchemaConverter converter([&](const std::string &) { return json(); }, /* dotall= */ false);
10451045
llama_grammar_builder builder {
1046-
.add_rule = [&](const std::string & name, const std::string & rule) {
1046+
/* .add_rule = */ [&](const std::string & name, const std::string & rule) {
10471047
return converter.add_rule(name, rule);
10481048
},
1049-
.add_schema = [&](const std::string & name, const nlohmann::ordered_json & schema) {
1049+
/* .add_schema = */ [&](const std::string & name, const nlohmann::ordered_json & schema) {
10501050
return converter.visit(schema, name);
10511051
},
1052-
.resolve_refs = [&](nlohmann::ordered_json & schema) {
1052+
/* .resolve_refs = */ [&](nlohmann::ordered_json & schema) {
10531053
converter.resolve_refs(schema, "");
10541054
}
10551055
};

common/minja.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2160,7 +2160,7 @@ class Parser {
21602160
throw unterminated(**start);
21612161
}
21622162
children.emplace_back(nonstd_make_unique<MacroNode>(token->location, std::move(macro_token->name), std::move(macro_token->params), std::move(body)));
2163-
} else if (auto comment_token = dynamic_cast<CommentTemplateToken*>(token.get())) {
2163+
} else if (dynamic_cast<CommentTemplateToken*>(token.get())) {
21642164
// Ignore comments
21652165
} else if (dynamic_cast<EndForTemplateToken*>(token.get())
21662166
|| dynamic_cast<EndSetTemplateToken*>(token.get())

common/tool-call.cpp

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ static bool parse_json(std::string::const_iterator & it, const std::string::cons
4141

4242
json_error_locator() : position(0), found_error(false) {}
4343

44-
bool parse_error(std::size_t position, const std::string & last_token, const json::exception & ex) override {
45-
// LOG_WARNING("JSON error (Expected)", {{"position", position}, {"last_token", last_token}, {"error", ex.what()}});
44+
bool parse_error(std::size_t position, const std::string &, const json::exception &) override {
4645
this->position = position - 1;
4746
this->found_error = true;
4847
return false;
@@ -70,13 +69,11 @@ static bool parse_json(std::string::const_iterator & it, const std::string::cons
7069
temptative_end = end;
7170
}
7271
std::string json_sub {it, temptative_end};
73-
// LOG_WARNING("Parsing json", {{"json_sub", json_sub}});
7472
try {
7573
out = json::parse(json_sub);
7674
it = temptative_end;
7775
return true;
78-
} catch (const std::exception & e) {
79-
// LOG_WARNING("Failed to parse tool call", {{"json_sub", json_sub}, {"error", e.what()}});
76+
} catch (const std::exception &) {
8077
return false;
8178
}
8279
}

common/tool-call.h

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,14 @@
11
#pragma once
22

33
#include "ggml.h"
4+
#include "common.h"
45
// Change JSON_ASSERT from assert() to GGML_ASSERT:
56
#define JSON_ASSERT GGML_ASSERT
67
#include "json.hpp"
78

8-
struct llama_tool_call {
9-
std::string name;
10-
std::string arguments;
11-
};
12-
139
struct llama_tool_calls {
1410
std::string content;
15-
std::vector<llama_tool_call> tool_calls;
11+
std::vector<llama_chat_msg_tool_call> tool_calls;
1612
};
1713

1814
struct llama_tool_call_handler {

examples/server/server.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -662,7 +662,7 @@ struct server_context {
662662
bool validate_model_chat_template(bool use_jinja) const {
663663
llama_chat_message chat[] = {{"user", "test"}};
664664

665-
const int res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0, use_jinja);
665+
const int res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0, use_jinja, nullptr, nullptr, nullptr);
666666

667667
return res > 0;
668668
}

include/llama.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -378,17 +378,17 @@ extern "C" {
378378

379379
// used in chat template
380380

381-
typedef struct llama_tool_call {
381+
typedef struct llama_chat_message_tool_call {
382382
const char * name;
383383
const char * arguments;
384-
} llama_tool_call;
384+
} llama_chat_message_tool_call;
385385

386386
typedef struct llama_chat_message {
387387
const char * role;
388388
const char * content;
389389
const char * tool;
390390

391-
const llama_tool_call * tool_calls;
391+
const llama_chat_message_tool_call * tool_calls;
392392
uint32_t n_tool_calls;
393393
} llama_chat_message;
394394

src/llama.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21081,8 +21081,9 @@ static int32_t llama_chat_apply_template_internal(
2108121081
context->set("tools", tools_val);
2108221082
}
2108321083
auto tmpl_root = minja::Parser::parse(tmpl, {
21084-
.trim_blocks = true,
21085-
.lstrip_blocks = true,
21084+
/* .trim_blocks = */ true,
21085+
/* .lstrip_blocks = */ true,
21086+
/* .keep_trailing_newline = */ false,
2108621087
});
2108721088
try {
2108821089
dest = tmpl_root->render(context);

0 commit comments

Comments
 (0)