Skip to content

Commit b8ce43b

Browse files
committed
fix another segfault
1 parent 3a9bec9 commit b8ce43b

File tree

2 files changed

+10
-2
lines changed

2 files changed

+10
-2
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@
3232
.swiftpm
3333
.vs/
3434
.vscode/
35+
.devcontainer/
36+
.github/copilot-instructions.md
3537
nppBackup
3638

3739

@@ -147,3 +149,4 @@ poetry.toml
147149
/run-vim.sh
148150
/run-chat.sh
149151
Testing/Temporary/CTestCostData.txt
152+

src/llama-model-loader.cpp

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -874,9 +874,9 @@ void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps
874874

875875
// The unified mapping represents all files, so we need to store it
876876
// for each file index to maintain compatibility with existing code
877-
size_t total_size = unified_mapping->size();
878877
for (size_t i = 0; i < files.size(); ++i) {
879-
mmaps_used.emplace_back(total_size, 0);
878+
// For mmaps_used, store the individual file size, not the total unified size
879+
mmaps_used.emplace_back(files[i]->size(), 0);
880880
if (mlock_mmaps && i == 0) { // Only lock once for the unified mapping
881881
std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
882882
mlock_mmap->init(unified_mapping->addr());
@@ -1254,6 +1254,11 @@ bool llama_model_loader::load_all_data(
12541254
const auto & mmap_used = mmaps_used.at(idx);
12551255
auto & mapping = mappings.at(idx);
12561256

1257+
// Skip null mappings (can happen with unified NUMA mappings)
1258+
if (!mapping) {
1259+
continue;
1260+
}
1261+
12571262
// Check if this mapping uses NUMA mirroring
12581263
// If so, skip the unmap_fragment calls as cleanup is handled in the destructor
12591264
bool is_numa_mirrored = false;

0 commit comments

Comments
 (0)