Skip to content

Commit 3297675

Browse files
committed
Bump llama.cpp to b4102 (db4cfd5)
* https://github.com/ggerganov/llama.cpp/commits/db4cfd5dbc31c90f0d5c413a2e182d068b8ee308 * build.rs & bindgen includes updates to reflect backend refactor - ggml-org/llama.cpp#10256
1 parent 4ab26a1 commit 3297675

File tree

3 files changed

+67
-20
lines changed

3 files changed

+67
-20
lines changed

llama-cpp-sys-2/Cargo.toml

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,37 +19,41 @@ include = [
1919
"/llama.cpp/ggml/src/*.h",
2020
"/llama.cpp/ggml/src/*.c",
2121
"/llama.cpp/ggml/src/*.cpp",
22+
"/llama.cpp/ggml/src/ggml-cpu/*.h",
23+
"/llama.cpp/ggml/src/ggml-cpu/*.c",
24+
"/llama.cpp/ggml/src/ggml-cpu/*.cpp",
2225
"/llama.cpp/src/*.h",
2326
"/llama.cpp/src/*.cpp",
2427

2528
"/llama.cpp/convert_hf_to_gguf.py", # Yes, it's required
2629

2730
"/llama.cpp/common/build-info.cpp.in",
2831

29-
"/llama.cpp/ggml/src/ggml-cuda.cu",
30-
"/llama.cpp/ggml/src/ggml-metal.m",
31-
"/llama.cpp/ggml/src/ggml-metal.metal",
32-
3332
"/llama.cpp/include/llama.h",
3433

3534
"/llama.cpp/ggml/src/ggml-cuda/**/*",
35+
"/llama.cpp/ggml/src/ggml-metal/**/*",
36+
"/llama.cpp/ggml/src/ggml-vulkan/**/*",
37+
"/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/**/*",
3638

37-
"/llama.cpp/ggml/src/vulkan-shaders/**/*",
38-
39-
"/llama.cpp/ggml/src/llamafile/sgemm.h",
40-
"/llama.cpp/ggml/src/llamafile/sgemm.cpp",
39+
"/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.h",
40+
"/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp",
4141

4242
"/llama.cpp/pocs",
4343

4444
"/llama.cpp/CMakeLists.txt",
4545
"/llama.cpp/common/CMakeLists.txt",
4646
"/llama.cpp/ggml/CMakeLists.txt",
4747
"/llama.cpp/ggml/src/CMakeLists.txt",
48-
"/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt",
48+
"/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt",
49+
"/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt",
50+
"/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt",
51+
"/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt",
4952
"/llama.cpp/src/CMakeLists.txt",
5053

5154
"/llama.cpp/cmake",
5255
"/llama.cpp/ggml/cmake",
56+
"/llama.cpp/ggml/src/ggml-cpu/cmake",
5357
"/llama.cpp/common/cmake",
5458
]
5559

@@ -70,3 +74,4 @@ dynamic-link = []
7074
vulkan = []
7175
native = []
7276
openmp = []
77+
llamafile = []

llama-cpp-sys-2/build.rs

Lines changed: 52 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,7 @@ fn extract_lib_names(out_dir: &Path, build_shared_libs: bool) -> Vec<String> {
6565
"*.a"
6666
}
6767
};
68-
let libs_dir = out_dir.join("lib");
69-
let pattern = libs_dir.join(lib_pattern);
68+
let pattern = out_dir.join(lib_pattern);
7069
debug_log!("Extract libs {}", pattern.display());
7170

7271
let mut lib_names: Vec<String> = Vec::new();
@@ -143,7 +142,6 @@ fn macos_link_search_path() -> Option<String> {
143142
}
144143

145144
fn main() {
146-
147145
let target = env::var("TARGET").unwrap();
148146
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
149147

@@ -152,6 +150,7 @@ fn main() {
152150
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("Failed to get CARGO_MANIFEST_DIR");
153151
let llama_src = Path::new(&manifest_dir).join("llama.cpp");
154152
let build_shared_libs = cfg!(feature = "cuda") || cfg!(feature = "dynamic-link");
153+
let use_llamafile = cfg!(feature = "llamafile");
155154

156155
let build_shared_libs = std::env::var("LLAMA_BUILD_SHARED_LIBS")
157156
.map(|v| v == "1")
@@ -166,6 +165,7 @@ fn main() {
166165
debug_log!("TARGET_DIR: {}", target_dir.display());
167166
debug_log!("OUT_DIR: {}", out_dir.display());
168167
debug_log!("BUILD_SHARED: {}", build_shared_libs);
168+
debug_log!("LLAMAFILE: {}", use_llamafile);
169169

170170
// Prepare sherpa-onnx source
171171
if !llama_dst.exists() {
@@ -196,7 +196,6 @@ fn main() {
196196
.generate()
197197
.expect("Failed to generate bindings");
198198

199-
200199
// Write the generated bindings to an output file
201200
let bindings_path = out_dir.join("bindings.rs");
202201
bindings
@@ -224,19 +223,21 @@ fn main() {
224223
if build_shared_libs { "ON" } else { "OFF" },
225224
);
226225

226+
config.define("GGML_LLAMAFILE", if use_llamafile { "ON" } else { "OFF" });
227+
227228
if cfg!(target_os = "macos") {
228229
config.define("GGML_BLAS", "OFF");
229230
}
230231

231232
if cfg!(windows) {
232233
config.static_crt(static_crt);
233234
}
234-
235235

236236
if cfg!(feature = "vulkan") {
237237
config.define("GGML_VULKAN", "ON");
238238
if cfg!(windows) {
239-
let vulkan_path = env::var("VULKAN_SDK").expect("Please install Vulkan SDK and ensure that VULKAN_SDK env variable is set");
239+
let vulkan_path = env::var("VULKAN_SDK")
240+
.expect("Please install Vulkan SDK and ensure that VULKAN_SDK env variable is set");
240241
let vulkan_lib_path = Path::new(&vulkan_path).join("Lib");
241242
println!("cargo:rustc-link-search={}", vulkan_lib_path.display());
242243
println!("cargo:rustc-link-lib=vulkan-1");
@@ -266,11 +267,52 @@ fn main() {
266267
// Search paths
267268
println!("cargo:rustc-link-search={}", out_dir.join("lib").display());
268269
println!("cargo:rustc-link-search={}", build_dir.display());
270+
println!(
271+
"cargo:rustc-link-search={}",
272+
build_dir.join("build/ggml/src").display()
273+
);
274+
println!(
275+
"cargo:rustc-link-search={}",
276+
build_dir.join("build/ggml/src/ggml-cpu").display()
277+
);
278+
println!(
279+
"cargo:rustc-link-search={}",
280+
build_dir.join("build/ggml/src/ggml-metal").display()
281+
);
282+
println!(
283+
"cargo:rustc-link-search={}",
284+
build_dir.join("build/ggml/src/ggml-cuda").display()
285+
);
286+
println!(
287+
"cargo:rustc-link-search={}",
288+
build_dir.join("build/ggml/src/ggml-vulkan").display()
289+
);
269290

270291
// Link libraries
271292
let llama_libs_kind = if build_shared_libs { "dylib" } else { "static" };
272-
let llama_libs = extract_lib_names(&out_dir, build_shared_libs);
273-
293+
let mut llama_libs = extract_lib_names(&out_dir.join("lib"), build_shared_libs);
294+
llama_libs.append(&mut extract_lib_names(
295+
&build_dir.join("build/ggml/src"),
296+
build_shared_libs,
297+
));
298+
llama_libs.append(&mut extract_lib_names(
299+
&build_dir.join("build/ggml/src/ggml-cpu"),
300+
build_shared_libs,
301+
));
302+
llama_libs.append(&mut extract_lib_names(
303+
&build_dir.join("build/ggml/src/ggml-metal"),
304+
build_shared_libs,
305+
));
306+
llama_libs.append(&mut extract_lib_names(
307+
&build_dir.join("build/ggml/src/ggml-vulkan"),
308+
build_shared_libs,
309+
));
310+
llama_libs.append(&mut extract_lib_names(
311+
&build_dir.join("build/ggml/src/ggml-cuda"),
312+
build_shared_libs,
313+
));
314+
llama_libs.sort_unstable();
315+
llama_libs.dedup();
274316
for lib in llama_libs {
275317
debug_log!(
276318
"LINK {}",
@@ -330,7 +372,7 @@ fn main() {
330372
debug_log!("HARD LINK {} TO {}", asset.display(), dst.display());
331373
if !dst.exists() {
332374
std::fs::hard_link(asset.clone(), dst).unwrap();
333-
}
375+
}
334376

335377
// Copy DLLs to examples as well
336378
if target_dir.join("examples").exists() {
@@ -349,4 +391,4 @@ fn main() {
349391
}
350392
}
351393
}
352-
}
394+
}

llama-cpp-sys-2/llama.cpp

0 commit comments

Comments
 (0)