Skip to content

Commit d04b9f5

Browse files
Merge pull request #243 from menloresearch/update-dev-from-master-2025-09-06-00-31
Sync master with upstream release b6396
2 parents 0b3190b + fd62188 commit d04b9f5

31 files changed

+669
-169
lines changed

.github/workflows/close-issue.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
steps:
1818
- uses: actions/stale@v5
1919
with:
20-
exempt-issue-labels: "refactoring,help wanted,good first issue,research,bug,roadmap"
20+
exempt-issue-labels: "refactoring,help wanted,good first issue,research 🔬,bug,roadmap"
2121
days-before-issue-stale: 30
2222
days-before-issue-close: 14
2323
stale-issue-label: "stale"

common/arg.cpp

Lines changed: 42 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1263,6 +1263,18 @@ static std::string list_builtin_chat_templates() {
12631263
return msg.str();
12641264
}
12651265

1266+
static bool is_truthy(const std::string & value) {
1267+
return value == "on" || value == "enabled" || value == "1";
1268+
}
1269+
1270+
static bool is_falsey(const std::string & value) {
1271+
return value == "off" || value == "disabled" || value == "0";
1272+
}
1273+
1274+
static bool is_autoy(const std::string & value) {
1275+
return value == "auto" || value == "-1";
1276+
}
1277+
12661278
common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
12671279
// load dynamic backends
12681280
ggml_backend_load_all();
@@ -1544,21 +1556,21 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
15441556
params.n_chunks = value;
15451557
}
15461558
).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
1547-
add_opt(common_arg(
1548-
{"-fa", "--flash-attn"}, "FA",
1549-
string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')", llama_flash_attn_type_name(params.flash_attn_type)),
1550-
[](common_params & params, const std::string & value) {
1551-
if (value == "on" || value == "enabled" || value == "1") {
1552-
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
1553-
} else if (value == "off" || value == "disabled" || value == "0") {
1554-
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
1555-
} else if (value == "auto" || value == "-1") {
1556-
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
1557-
} else {
1558-
throw std::runtime_error(string_format("error: unkown value for --flash-attn: '%s'\n", value.c_str()));
1559-
}
1560-
}
1561-
).set_env("LLAMA_ARG_FLASH_ATTN"));
1559+
add_opt(common_arg({ "-fa", "--flash-attn" }, "[on|off|auto]",
1560+
string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')",
1561+
llama_flash_attn_type_name(params.flash_attn_type)),
1562+
[](common_params & params, const std::string & value) {
1563+
if (is_truthy(value)) {
1564+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
1565+
} else if (is_falsey(value)) {
1566+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
1567+
} else if (is_autoy(value)) {
1568+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
1569+
} else {
1570+
throw std::runtime_error(
1571+
string_format("error: unkown value for --flash-attn: '%s'\n", value.c_str()));
1572+
}
1573+
}).set_env("LLAMA_ARG_FLASH_ATTN"));
15621574
add_opt(common_arg(
15631575
{"-p", "--prompt"}, "PROMPT",
15641576
"prompt to start generation with; for system message, use -sys",
@@ -3134,13 +3146,21 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
31343146
common_log_set_file(common_log_main(), value.c_str());
31353147
}
31363148
));
3137-
add_opt(common_arg(
3138-
{"--log-colors"},
3139-
"Enable colored logging",
3140-
[](common_params &) {
3141-
common_log_set_colors(common_log_main(), true);
3142-
}
3143-
).set_env("LLAMA_LOG_COLORS"));
3149+
add_opt(common_arg({ "--log-colors" }, "[on|off|auto]",
3150+
"Set colored logging ('on', 'off', or 'auto', default: 'auto')\n"
3151+
"'auto' enables colors when output is to a terminal",
3152+
[](common_params &, const std::string & value) {
3153+
if (is_truthy(value)) {
3154+
common_log_set_colors(common_log_main(), LOG_COLORS_ENABLED);
3155+
} else if (is_falsey(value)) {
3156+
common_log_set_colors(common_log_main(), LOG_COLORS_DISABLED);
3157+
} else if (is_autoy(value)) {
3158+
common_log_set_colors(common_log_main(), LOG_COLORS_AUTO);
3159+
} else {
3160+
throw std::invalid_argument(
3161+
string_format("error: unkown value for --log-colors: '%s'\n", value.c_str()));
3162+
}
3163+
}).set_env("LLAMA_LOG_COLORS"));
31443164
add_opt(common_arg(
31453165
{"-v", "--verbose", "--log-verbose"},
31463166
"Set verbosity level to infinity (i.e. log all messages, useful for debugging)",

common/chat.cpp

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,19 @@ common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::strin
163163
throw std::runtime_error("Invalid tool_choice: " + tool_choice);
164164
}
165165

166+
bool common_chat_templates_support_enable_thinking(const common_chat_templates * chat_templates) {
167+
common_chat_templates_inputs dummy_inputs;
168+
common_chat_msg msg;
169+
msg.role = "user";
170+
msg.content = "test";
171+
dummy_inputs.messages = {msg};
172+
dummy_inputs.enable_thinking = false;
173+
const auto rendered_no_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
174+
dummy_inputs.enable_thinking = true;
175+
const auto rendered_with_thinking = common_chat_templates_apply(chat_templates, dummy_inputs);
176+
return rendered_no_thinking.prompt != rendered_with_thinking.prompt;
177+
}
178+
166179
template <>
167180
std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const json & messages) {
168181
std::vector<common_chat_msg> msgs;

common/chat.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,8 @@ common_chat_msg common_chat_parse(const std::string & input, bool is_p
199199

200200
common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice);
201201

202+
bool common_chat_templates_support_enable_thinking(const common_chat_templates * chat_templates);
203+
202204
// Parses a JSON array of messages in OpenAI's chat completion API format.
203205
// T can be std::string containing JSON or nlohmann::ordered_json
204206
template <class T> std::vector<common_chat_msg> common_chat_msgs_parse_oaicompat(const T & messages);

common/log.cpp

Lines changed: 53 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,17 +4,52 @@
44
#include <condition_variable>
55
#include <cstdarg>
66
#include <cstdio>
7+
#include <cstdlib>
8+
#include <cstring>
79
#include <mutex>
810
#include <sstream>
911
#include <thread>
1012
#include <vector>
1113

14+
#if defined(_WIN32)
15+
# include <io.h>
16+
# include <windows.h>
17+
# define isatty _isatty
18+
# define fileno _fileno
19+
#else
20+
# include <unistd.h>
21+
#endif // defined(_WIN32)
22+
1223
int common_log_verbosity_thold = LOG_DEFAULT_LLAMA;
1324

1425
void common_log_set_verbosity_thold(int verbosity) {
1526
common_log_verbosity_thold = verbosity;
1627
}
1728

29+
// Auto-detect if colors should be enabled based on terminal and environment
30+
static bool common_log_should_use_colors_auto() {
31+
// Check NO_COLOR environment variable (https://no-color.org/)
32+
if (const char * no_color = std::getenv("NO_COLOR")) {
33+
if (no_color[0] != '\0') {
34+
return false;
35+
}
36+
}
37+
38+
// Check TERM environment variable
39+
if (const char * term = std::getenv("TERM")) {
40+
if (std::strcmp(term, "dumb") == 0) {
41+
return false;
42+
}
43+
}
44+
45+
// Check if stdout and stderr are connected to a terminal
46+
// We check both because log messages can go to either
47+
bool stdout_is_tty = isatty(fileno(stdout));
48+
bool stderr_is_tty = isatty(fileno(stderr));
49+
50+
return stdout_is_tty || stderr_is_tty;
51+
}
52+
1853
static int64_t t_us() {
1954
return std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
2055
}
@@ -353,6 +388,11 @@ struct common_log * common_log_init() {
353388

354389
struct common_log * common_log_main() {
355390
static struct common_log log;
391+
static std::once_flag init_flag;
392+
std::call_once(init_flag, [&]() {
393+
// Set default to auto-detect colors
394+
log.set_colors(common_log_should_use_colors_auto());
395+
});
356396

357397
return &log;
358398
}
@@ -380,8 +420,19 @@ void common_log_set_file(struct common_log * log, const char * file) {
380420
log->set_file(file);
381421
}
382422

383-
void common_log_set_colors(struct common_log * log, bool colors) {
384-
log->set_colors(colors);
423+
void common_log_set_colors(struct common_log * log, log_colors colors) {
424+
if (colors == LOG_COLORS_AUTO) {
425+
log->set_colors(common_log_should_use_colors_auto());
426+
return;
427+
}
428+
429+
if (colors == LOG_COLORS_DISABLED) {
430+
log->set_colors(false);
431+
return;
432+
}
433+
434+
GGML_ASSERT(colors == LOG_COLORS_ENABLED);
435+
log->set_colors(true);
385436
}
386437

387438
void common_log_set_prefix(struct common_log * log, bool prefix) {

common/log.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,12 @@
2424
#define LOG_DEFAULT_DEBUG 1
2525
#define LOG_DEFAULT_LLAMA 0
2626

27+
enum log_colors {
28+
LOG_COLORS_AUTO = -1,
29+
LOG_COLORS_DISABLED = 0,
30+
LOG_COLORS_ENABLED = 1,
31+
};
32+
2733
// needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower
2834
// set via common_log_set_verbosity()
2935
extern int common_log_verbosity_thold;
@@ -65,10 +71,10 @@ void common_log_add(struct common_log * log, enum ggml_log_level level, const ch
6571
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
6672
//
6773

68-
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
69-
void common_log_set_colors (struct common_log * log, bool colors); // not thread-safe
70-
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
71-
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
74+
void common_log_set_file (struct common_log * log, const char * file); // not thread-safe
75+
void common_log_set_colors (struct common_log * log, log_colors colors); // not thread-safe
76+
void common_log_set_prefix (struct common_log * log, bool prefix); // whether to output prefix to each log
77+
void common_log_set_timestamps(struct common_log * log, bool timestamps); // whether to output timestamps in the prefix
7278

7379
// helper macros for logging
7480
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold

convert_lora_to_gguf.py

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from math import prod
1313
from pathlib import Path
1414
from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, Sequence, SupportsIndex, cast
15-
from transformers import AutoConfig
15+
from transformers import AutoConfig, AutoTokenizer
1616

1717
import torch
1818

@@ -26,6 +26,8 @@
2626
# reuse model definitions from convert_hf_to_gguf.py
2727
from convert_hf_to_gguf import LazyTorchTensor, ModelBase
2828

29+
from gguf.constants import GGUFValueType
30+
2931
logger = logging.getLogger("lora-to-gguf")
3032

3133

@@ -369,7 +371,31 @@ def set_type(self):
369371
self.gguf_writer.add_string(gguf.Keys.Adapter.TYPE, "lora")
370372

371373
def set_gguf_parameters(self):
374+
logger.debug("GGUF KV: %s = %d", gguf.Keys.Adapter.LORA_ALPHA, self.lora_alpha)
372375
self.gguf_writer.add_float32(gguf.Keys.Adapter.LORA_ALPHA, self.lora_alpha)
376+
alora_invocation_tokens = lparams.get("alora_invocation_tokens")
377+
invocation_string = lparams.get("invocation_string")
378+
if invocation_string and not alora_invocation_tokens:
379+
logger.debug("Tokenizing invocation_string -> alora_invocation_tokens")
380+
base_model_path_or_id = hparams.get("_name_or_path")
381+
try:
382+
tokenizer = AutoTokenizer.from_pretrained(base_model_path_or_id)
383+
except ValueError:
384+
logger.error("Unable to load tokenizer from %s", base_model_path_or_id)
385+
raise
386+
# NOTE: There's an off-by-one with the older aLoRAs where
387+
# the invocation string includes the "<|start_of_turn|>"
388+
# token, but the adapters themselves were trained to
389+
# activate _after_ that first token, so we drop it here.
390+
alora_invocation_tokens = tokenizer(invocation_string)["input_ids"][1:]
391+
if alora_invocation_tokens:
392+
logger.debug("GGUF KV: %s = %s", gguf.Keys.Adapter.ALORA_INVOCATION_TOKENS, alora_invocation_tokens)
393+
self.gguf_writer.add_key_value(
394+
gguf.Keys.Adapter.ALORA_INVOCATION_TOKENS,
395+
alora_invocation_tokens,
396+
GGUFValueType.ARRAY,
397+
GGUFValueType.UINT32,
398+
)
373399

374400
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
375401
# Never add extra tensors (e.g. rope_freqs) for LoRA adapters

examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -333,17 +333,17 @@ static void print_params(struct my_llama_hparams * params) {
333333
}
334334

335335
static void print_tensor_info(const struct ggml_context * ctx) {
336-
for (auto t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
336+
for (auto * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
337337
LOG_INF("%s: Allocating ", __func__);
338338
int64_t total = 1;
339339
int i = 0;
340340
for (; i < ggml_n_dims(t); ++i) {
341-
if (i > 0) LOG("x ");
342-
LOG("[%" PRId64 "] ", t->ne[i]);
341+
if (i > 0) { LOG_INF("x "); }
342+
LOG_INF("[%" PRId64 "] ", t->ne[i]);
343343
total *= t->ne[i];
344344
}
345-
if (i > 1) LOG("= [%" PRId64 "] ", total);
346-
LOG("float space for %s\n", ggml_get_name(t));
345+
if (i > 1) { LOG_INF("= [%" PRId64 "] ", total); }
346+
LOG_INF("float space for %s\n", ggml_get_name(t));
347347
}
348348
}
349349

examples/model-conversion/scripts/embedding/modelcard.template

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ base_model:
77
Recommended way to run this model:
88

99
```sh
10-
llama-server -hf {namespace}/{model_name}-GGUF
10+
llama-server -hf {namespace}/{model_name}-GGUF --embeddings
1111
```
1212

1313
Then the endpoint can be accessed at http://localhost:8080/embedding, for

ggml/src/ggml-cuda/common.cuh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -570,6 +570,8 @@ static __device__ __forceinline__ float ggml_cuda_e8m0_to_fp32(uint8_t x) {
570570
//
571571
// n/d = (mulhi(n, mp) + n) >> L;
572572
static const uint3 init_fastdiv_values(uint32_t d) {
573+
GGML_ASSERT(d != 0);
574+
573575
// compute L = ceil(log2(d));
574576
uint32_t L = 0;
575577
while (L < 32 && (uint32_t{ 1 } << L) < d) {

0 commit comments

Comments
 (0)