Skip to content

Commit d739511

Browse files
authored
llama : use std::abs instead of abs (#16853)
1 parent 052df28 commit d739511

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

src/llama-graph.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2035,7 +2035,7 @@ int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buck
20352035

20362036
if (bidirectional) {
20372037
relative_bucket += (relative_position > 0) * n_buckets;
2038-
relative_position = abs(relative_position);
2038+
relative_position = std::abs(relative_position);
20392039
} else {
20402040
relative_position = -std::min<int32_t>(relative_position, 0);
20412041
}

src/llama-quant.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -653,7 +653,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
653653
gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
654654
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
655655
// Setting type to UINT32. See https://github.com/ggml-org/llama.cpp/pull/14182 for context
656-
gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)abs(o.val_i64));
656+
gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)std::abs(o.val_i64));
657657
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
658658
gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
659659
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {

0 commit comments

Comments
 (0)