Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
70ec462
fix: address high severity security issues and initial medium severit…
devin-ai-integration[bot] Oct 21, 2025
1691625
fix: add path traversal validation in embed_wgsl.py
devin-ai-integration[bot] Oct 21, 2025
8d02b22
fix: add path validation in Python scripts
devin-ai-integration[bot] Oct 21, 2025
69d8be7
fix: add path validation in additional Python scripts
devin-ai-integration[bot] Oct 21, 2025
2e887ac
fix: add path validation for remaining Python file operations (PT vul…
devin-ai-integration[bot] Oct 21, 2025
db03ff8
fix: add path validation for C++ file operations (PT vulnerabilities)
devin-ai-integration[bot] Oct 21, 2025
eeb897e
fix: add path validation for gguf-split file operations (PT vulnerabi…
devin-ai-integration[bot] Oct 21, 2025
9e101f1
fix: add path validation for tokenize and quantize file operations (P…
devin-ai-integration[bot] Oct 21, 2025
af5d70d
fix: add path validation for remaining C++ file operations (PT vulner…
devin-ai-integration[bot] Oct 21, 2025
196343d
fix: add bounds checking for integer overflow in console.cpp
devin-ai-integration[bot] Oct 21, 2025
4cc81b0
fix: convert PosixPath to string before calling endswith in create_op…
devin-ai-integration[bot] Oct 21, 2025
9cb89cb
fix: remove trailing whitespace in embed_kernel.py
devin-ai-integration[bot] Oct 21, 2025
e371721
fix: add bounds checking for integer overflow in linenoise.cpp
devin-ai-integration[bot] Oct 21, 2025
0487c34
fix: add bounds checking for integer overflow in perplexity.cpp
devin-ai-integration[bot] Oct 21, 2025
577c235
fix: add bounds checking for integer overflow in convert-llama2c-to-g…
devin-ai-integration[bot] Oct 21, 2025
f231693
fix: add overflow check for tensor memory allocation in gguf.cpp
devin-ai-integration[bot] Oct 21, 2025
680d493
fix: add null pointer checks for fopen calls in save-load-state.cpp
devin-ai-integration[bot] Oct 21, 2025
0c5b4fb
fix: add null pointer checks for fopen calls in vulkan-shaders-gen.cpp
devin-ai-integration[bot] Oct 21, 2025
9d7a436
fix: add null pointer check for fopen call in ggml.c
devin-ai-integration[bot] Oct 21, 2025
8ea75c3
fix: add null pointer check for getenv call in llama-model-loader.cpp
devin-ai-integration[bot] Oct 21, 2025
bf4b8cf
fix: add URL validation to prevent SSRF attacks in run.cpp
devin-ai-integration[bot] Oct 21, 2025
2824502
fix: add URL validation to prevent SSRF attacks in tts-outetts.py
devin-ai-integration[bot] Oct 21, 2025
41b5567
fix: add URL validation to prevent SSRF in json_schema_to_grammar.py
devin-ai-integration[bot] Oct 21, 2025
a1be60e
fix: add URL validation to prevent SSRF in bench.py and pydantic_mode…
devin-ai-integration[bot] Oct 21, 2025
495b1f5
fix: add integer overflow checks for memory allocation in llama-batch…
devin-ai-integration[bot] Oct 21, 2025
d6a35cc
fix: add integer overflow checks for memory allocation in ggml-backen…
devin-ai-integration[bot] Oct 21, 2025
a441474
fix: add integer overflow check for memory allocation in ggml-alloc.c
devin-ai-integration[bot] Oct 21, 2025
3baffd2
fix: add integer overflow checks for memory allocation in ggml-vulkan…
devin-ai-integration[bot] Oct 21, 2025
aefd843
fix: add integer overflow checks for calloc in ggml_gallocr_new_n
devin-ai-integration[bot] Oct 21, 2025
1ab01a0
fix: add integer overflow check for calloc in ggml_build_backward_expand
devin-ai-integration[bot] Oct 21, 2025
da5b6cf
fix: add integer overflow checks for calloc in ggml_backend_graph_copy
devin-ai-integration[bot] Oct 21, 2025
b551658
fix: add integer overflow checks for realloc in ggml_backend_sched_sp…
devin-ai-integration[bot] Oct 21, 2025
cc19054
fix: add integer overflow checks for malloc in ggml-quants.c
devin-ai-integration[bot] Oct 21, 2025
4852a8a
fix: add integer overflow check for malloc in ggml_backend_multi_buff…
devin-ai-integration[bot] Oct 21, 2025
6b065fc
fix: add integer overflow checks for malloc in llama-android.cpp
devin-ai-integration[bot] Oct 21, 2025
99b984b
fix: add integer overflow check for realloc in linenoiseAddCompletion
devin-ai-integration[bot] Oct 21, 2025
a8309ca
fix: replace strcpy with strncpy for safer string operations in quant…
devin-ai-integration[bot] Oct 21, 2025
69b31b1
fix: replace strcpy with strncpy for safer string operations in ggml-…
devin-ai-integration[bot] Oct 21, 2025
e81d813
fix: add overflow checks for calloc operations in tokenize.cpp
devin-ai-integration[bot] Oct 21, 2025
8f03ebe
fix: add overflow check for calloc operation in clip_log_internal_v
devin-ai-integration[bot] Oct 21, 2025
01e8e23
fix: add overflow check for calloc operation in ggml_log_internal_v
devin-ai-integration[bot] Oct 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 19 additions & 7 deletions common/console.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -295,12 +295,19 @@ namespace console {
return expectedWidth;
}

if (x2 < 0 || x1 < 0 || x2 > 10000 || x1 > 10000) {
return expectedWidth;
}

int width = x2 - x1;
if (width < 0) {
// Calculate the width considering text wrapping
struct winsize w;
ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
width += w.ws_col;
if (w.ws_col > 0 && w.ws_col < 10000) {
width += w.ws_col;
} else {
return expectedWidth;
}
}
return width;
#endif
Expand Down Expand Up @@ -398,7 +405,9 @@ namespace console {
do {
count = widths.back();
widths.pop_back();
// Move cursor back, print space, and move cursor back again
if (count < 0 || count > 10000) {
count = 0;
}
for (int i = 0; i < count; i++) {
replace_last(' ');
pop_cursor();
Expand All @@ -409,11 +418,14 @@ namespace console {
} else {
int offset = line.length();
append_utf8(input_char, line);
int width = put_codepoint(line.c_str() + offset, line.length() - offset, estimateWidth(input_char));
if (width < 0) {
width = 0;
size_t current_len = line.length();
if (current_len >= (size_t)offset && current_len < 100000) {
int width = put_codepoint(line.c_str() + offset, current_len - offset, estimateWidth(input_char));
if (width < 0) {
width = 0;
}
widths.push_back(width);
}
widths.push_back(width);
}

if (!line.empty() && (line.back() == '\\' || line.back() == '/')) {
Expand Down
3 changes: 3 additions & 0 deletions convert_lora_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,9 @@
lora_model = torch.load(input_model, map_location="cpu", weights_only=True)

# load LoRA config
if not os.path.isfile(lora_config) or not lora_config.endswith(('.json', '.JSON')):

Check failure on line 320 in convert_lora_to_gguf.py

View workflow job for this annotation

GitHub Actions / pyright type-check

Cannot access attribute "endswith" for class "Path"   Attribute "endswith" is unknown (reportAttributeAccessIssue)
logger.error(f"Invalid LoRA config file: {lora_config}")
sys.exit(1)
with open(lora_config, "r") as f:
lparams: dict[str, Any] = json.load(f)

Expand Down
11 changes: 10 additions & 1 deletion examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,12 @@ static int checkpoint_init_weights(TransformerWeights * w, const Config * p, FIL

// Skip freq_cis_real & freq_cis_imag
int head_size = p->dim / p->n_heads;
fseek(f, p->seq_len * head_size * sizeof(float), SEEK_CUR);
if (head_size < 0 || head_size > 10000 || p->seq_len < 0 || p->seq_len > 100000) {
LOG_ERR("%s: Invalid head_size or seq_len\n", __func__);
return 1;
}
long skip_size = (long)p->seq_len * (long)head_size * sizeof(float);
fseek(f, skip_size, SEEK_CUR);

if (!shared_weights && fread(w->wcls.data(), sizeof(float), w->wcls.size(), f) != w->wcls.size()) return 1;

Expand Down Expand Up @@ -885,6 +890,10 @@ int main(int argc, char ** argv) {
TransformerWeights weights = {};
{
LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
if (!params.fn_llama2c_model || strlen(params.fn_llama2c_model) == 0) {
LOG_ERR("%s: Invalid model file path\n", __func__);
return 1;
}
FILE * file = fopen(params.fn_llama2c_model, "rb");
if (!file) {
LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
Expand Down
2 changes: 2 additions & 0 deletions examples/convert_legacy_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -679,6 +679,8 @@ def must_read(fp: IO[bytes], length: int) -> bytes:

@functools.lru_cache(maxsize=None)
def lazy_load_file(path: Path) -> ModelPlus:
if not os.path.isfile(path):
raise ValueError(f"File does not exist: {path}")
fp = open(path, 'rb')
first8 = fp.read(8)
fp.seek(0)
Expand Down
8 changes: 4 additions & 4 deletions examples/gguf-hash/gguf-hash.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,8 @@ static bool manifest_type(const std::string & manifest_file, manifest_check_para
return false;
}

std::ifstream file(manifest_file);
if (!file.is_open()) {
std::ifstream file(manifest_file, std::ios::binary);
if (!file.is_open() || !file.good()) {
return false;
}

Expand Down Expand Up @@ -238,8 +238,8 @@ static hash_manifest_result_t manifest_verify(const std::string& manifest_file,
return HASH_MANIFEST_NOT_FOUND;
}

std::ifstream file(manifest_file);
if (!file.is_open()) {
std::ifstream file(manifest_file, std::ios::binary);
if (!file.is_open() || !file.good()) {
return HASH_MANIFEST_NOT_FOUND;
}

Expand Down
16 changes: 16 additions & 0 deletions examples/json_schema_to_grammar.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import json
import re
import sys
import os
from typing import Any, List, Optional, Set, Tuple, Union

def _build_repetition(item_rule, min_items, max_items, separator_rule=None):
Expand Down Expand Up @@ -792,12 +793,27 @@ def main(args_in = None):
if args.schema.startswith('https://'):
url = args.schema
import requests
import urllib.parse

parsed = urllib.parse.urlparse(url)
if parsed.hostname in ['localhost', '127.0.0.1', '0.0.0.0']:
raise ValueError(f"Invalid URL: localhost not allowed")
if (parsed.hostname and (parsed.hostname.startswith('10.') or
parsed.hostname.startswith('192.168.') or
parsed.hostname.startswith('169.254.') or
any(parsed.hostname.startswith(f'172.{i}.') for i in range(16, 32)))):
raise ValueError(f"Invalid URL: private IP ranges not allowed")

schema = requests.get(url).json()
elif args.schema == '-':
url = 'stdin'
schema = json.load(sys.stdin)
else:
url = f'file://{args.schema}'
if not os.path.isfile(args.schema) or not args.schema.endswith((".json", ".JSON")):

raise ValueError(f"Invalid schema file: {args.schema}")

with open(args.schema) as f:
schema = json.load(f)
converter = SchemaConverter(
Expand Down
59 changes: 59 additions & 0 deletions examples/llama.android/llama/src/main/cpp/llama-android.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -286,17 +286,76 @@ Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens,
};

if (embd) {
if (n_tokens > 0 && embd > 0 && (size_t)n_tokens > SIZE_MAX / sizeof(float) / (size_t)embd) {
LOGe("integer overflow in embd allocation");
delete batch;
return 0;
}
batch->embd = (float *) malloc(sizeof(float) * n_tokens * embd);
} else {
if (n_tokens > 0 && (size_t)n_tokens > SIZE_MAX / sizeof(llama_token)) {
LOGe("integer overflow in token allocation");
delete batch;
return 0;
}
batch->token = (llama_token *) malloc(sizeof(llama_token) * n_tokens);
}

if (n_tokens > 0 && (size_t)n_tokens > SIZE_MAX / sizeof(llama_pos)) {
LOGe("integer overflow in pos allocation");
if (embd) free(batch->embd); else free(batch->token);
delete batch;
return 0;
}
batch->pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens);

if (n_tokens > 0 && (size_t)n_tokens > SIZE_MAX / sizeof(int32_t)) {
LOGe("integer overflow in n_seq_id allocation");
free(batch->pos);
if (embd) free(batch->embd); else free(batch->token);
delete batch;
return 0;
}
batch->n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens);

if (n_tokens > 0 && (size_t)n_tokens > SIZE_MAX / sizeof(llama_seq_id *)) {
LOGe("integer overflow in seq_id allocation");
free(batch->n_seq_id);
free(batch->pos);
if (embd) free(batch->embd); else free(batch->token);
delete batch;
return 0;
}
batch->seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * n_tokens);

for (int i = 0; i < n_tokens; ++i) {
if (n_seq_max > 0 && (size_t)n_seq_max > SIZE_MAX / sizeof(llama_seq_id)) {
LOGe("integer overflow in seq_id[%d] allocation", i);
for (int j = 0; j < i; ++j) {
free(batch->seq_id[j]);
}
free(batch->seq_id);
free(batch->n_seq_id);
free(batch->pos);
if (embd) free(batch->embd); else free(batch->token);
delete batch;
return 0;
}
batch->seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
}

if (n_tokens > 0 && (size_t)n_tokens > SIZE_MAX / sizeof(int8_t)) {
LOGe("integer overflow in logits allocation");
for (int i = 0; i < n_tokens; ++i) {
free(batch->seq_id[i]);
}
free(batch->seq_id);
free(batch->n_seq_id);
free(batch->pos);
if (embd) free(batch->embd); else free(batch->token);
delete batch;
return 0;
}
batch->logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens);

return reinterpret_cast<jlong>(batch);
Expand Down
5 changes: 5 additions & 0 deletions examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,11 @@ struct DownloadButton: View {

do {
if let temporaryURL = temporaryURL {
let tempDir = FileManager.default.temporaryDirectory
guard temporaryURL.path.hasPrefix(tempDir.path) else {
print("Error: Invalid temporary file path")
return
}
try FileManager.default.copyItem(at: temporaryURL, to: fileURL)
print("Writing to \(filename) completed")

Expand Down
5 changes: 5 additions & 0 deletions examples/llama.swiftui/llama.swiftui/UI/InputButton.swift
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ struct InputButton: View {

do {
if let temporaryURL = temporaryURL {
let tempDir = FileManager.default.temporaryDirectory
guard temporaryURL.path.hasPrefix(tempDir.path) else {
print("Error: Invalid temporary file path")
return
}
try FileManager.default.copyItem(at: temporaryURL, to: fileURL)
print("Writing to \(filename) completed")

Expand Down
10 changes: 8 additions & 2 deletions examples/model-conversion/logits.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,12 @@ int main(int argc, char ** argv) {

std::filesystem::create_directory("data");

// Save logits to binary file
char bin_filename[512];
snprintf(bin_filename, sizeof(bin_filename), "data/llamacpp-%s%s.bin", model_name, type);
if (strlen(bin_filename) == 0) {
fprintf(stderr, "%s: error: invalid binary output filename\n", __func__);
return 1;
}
printf("Saving logits to %s\n", bin_filename);

FILE * f = fopen(bin_filename, "wb");
Expand All @@ -174,9 +177,12 @@ int main(int argc, char ** argv) {
fwrite(logits, sizeof(float), n_logits, f);
fclose(f);

// Also save as text for debugging
char txt_filename[512];
snprintf(txt_filename, sizeof(txt_filename), "data/llamacpp-%s%s.txt", model_name, type);
if (strlen(txt_filename) == 0) {
fprintf(stderr, "%s: error: invalid text output filename\n", __func__);
return 1;
}
f = fopen(txt_filename, "w");
if (f == NULL) {
fprintf(stderr, "%s: error: failed to open text output file\n", __func__);
Expand Down
3 changes: 3 additions & 0 deletions examples/model-conversion/scripts/utils/inspect-org-model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@
# Multi-file model
print("Multi-file model detected")

if not os.path.isfile(index_path) or not index_path.endswith('.json'):
print(f"Error: Invalid index file: {index_path}")
exit(1)
with open(index_path, 'r') as f:
index_data = json.load(f)

Expand Down
11 changes: 11 additions & 0 deletions examples/pydantic_models_to_grammar_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,17 @@
See
https://github.com/ggml-org/llama.cpp/tree/HEAD/tools/server#api-endpoints
"""
import urllib.parse

Check warning on line 28 in examples/pydantic_models_to_grammar_examples.py

View workflow job for this annotation

GitHub Actions / pyright type-check

Import "urllib.parse" is not accessed (reportUnusedImport)

blocked_hosts = ['localhost', '127.0.0.1', '0.0.0.0']
if host in blocked_hosts:
raise ValueError(f"Invalid host: localhost not allowed")
if (host.startswith('10.') or
host.startswith('192.168.') or
host.startswith('169.254.') or
any(host.startswith(f'172.{i}.') for i in range(16, 32))):
raise ValueError(f"Invalid host: private IP ranges not allowed")

print(f" Request:\n Grammar:\n{textwrap.indent(gbnf_grammar, ' ')}\n Prompt:\n{textwrap.indent(prompt.rstrip(), ' ')}")
headers = {"Content-Type": "application/json"}
data = {"prompt": prompt, "grammar": gbnf_grammar}
Expand Down
12 changes: 12 additions & 0 deletions examples/save-load-state/save-load-state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,10 @@ int main(int argc, char ** argv) {
const size_t written = llama_state_get_data(ctx, state_mem.data(), state_mem.size());

FILE *fp_write = fopen("dump_state.bin", "wb");
if (fp_write == nullptr) {
fprintf(stderr, "%s : failed to open dump_state.bin for writing\n", __func__);
return 1;
}
fwrite(state_mem.data(), 1, written, fp_write);
fclose(fp_write);

Expand Down Expand Up @@ -116,6 +120,10 @@ int main(int argc, char ** argv) {
std::vector<uint8_t> state_mem;

FILE * fp_read = fopen("dump_state.bin", "rb");
if (fp_read == nullptr) {
fprintf(stderr, "\n%s : failed to open dump_state.bin for reading\n", __func__);
return 1;
}
fseek(fp_read, 0, SEEK_END);
state_mem.resize(ftell(fp_read));
fseek(fp_read, 0, SEEK_SET);
Expand Down Expand Up @@ -173,6 +181,10 @@ int main(int argc, char ** argv) {
std::vector<uint8_t> state_mem;

FILE * fp_read = fopen("dump_state.bin", "rb");
if (fp_read == nullptr) {
fprintf(stderr, "\n%s : failed to open dump_state.bin for reading\n", __func__);
return 1;
}
fseek(fp_read, 0, SEEK_END);
state_mem.resize(ftell(fp_read));
fseek(fp_read, 0, SEEK_SET);
Expand Down
22 changes: 22 additions & 0 deletions ggml/src/ggml-alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,15 +364,34 @@ struct ggml_gallocr {
};

ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) {
if (n_bufs < 0) {
return NULL;
}

ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(1, sizeof(struct ggml_gallocr));
GGML_ASSERT(galloc != NULL);

if (n_bufs > 0 && (size_t)n_bufs > SIZE_MAX / sizeof(ggml_backend_buffer_type_t)) {
free(galloc);
return NULL;
}
galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t));
GGML_ASSERT(galloc->bufts != NULL);

if (n_bufs > 0 && (size_t)n_bufs > SIZE_MAX / sizeof(ggml_backend_buffer_t)) {
free(galloc->bufts);
free(galloc);
return NULL;
}
galloc->buffers = calloc(n_bufs, sizeof(ggml_backend_buffer_t));
GGML_ASSERT(galloc->buffers != NULL);

if (n_bufs > 0 && (size_t)n_bufs > SIZE_MAX / sizeof(struct ggml_dyn_tallocr *)) {
free(galloc->buffers);
free(galloc->bufts);
free(galloc);
return NULL;
}
galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *));
GGML_ASSERT(galloc->buf_tallocs != NULL);

Expand Down Expand Up @@ -668,6 +687,9 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
GGML_ASSERT(galloc->hash_set.keys != NULL);

free(galloc->hash_values);
if (galloc->hash_set.size > SIZE_MAX / sizeof(struct hash_node)) {
GGML_ABORT("integer overflow in memory allocation");
}
galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size);
GGML_ASSERT(galloc->hash_values != NULL);
}
Expand Down
Loading
Loading