Skip to content

Commit 62ac6b5

Browse files
authored
Merge pull request #96 from SilasMarvin/silas-8-metal-on-mac
Working build.rs for apple metal
2 parents 870c5c8 + eae1f11 commit 62ac6b5

File tree

1 file changed

+14
-35
lines changed

1 file changed

+14
-35
lines changed

llama-cpp-sys-2/build.rs

Lines changed: 14 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,13 @@ fn main() {
1212
}
1313

1414
let mut ggml = cc::Build::new();
15-
let mut ggml_cuda = if cublas_enabled { Some(cc::Build::new()) } else { None };
16-
let mut ggml_metal= if cfg!(target_os = "macos") { Some(cc::Build::new()) } else { None };
1715
let mut llama_cpp = cc::Build::new();
1816

1917
ggml.cpp(false);
2018
llama_cpp.cpp(true);
2119

2220
// https://github.com/ggerganov/llama.cpp/blob/a836c8f534ab789b02da149fbdaf7735500bff74/Makefile#L364-L368
23-
if let Some(ggml_cuda) = &mut ggml_cuda {
21+
if cublas_enabled {
2422
for lib in [
2523
"cuda", "cublas", "culibos", "cudart", "cublasLt", "pthread", "dl", "rt",
2624
] {
@@ -30,32 +28,34 @@ fn main() {
3028
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
3129

3230
if cfg!(target_arch = "aarch64") {
33-
ggml_cuda
34-
.flag_if_supported("-mfp16-format=ieee")
31+
ggml.flag_if_supported("-mfp16-format=ieee")
3532
.flag_if_supported("-mno-unaligned-access");
3633
llama_cpp
3734
.flag_if_supported("-mfp16-format=ieee")
3835
.flag_if_supported("-mno-unaligned-access");
39-
ggml_cuda
40-
.flag_if_supported("-mfp16-format=ieee")
36+
ggml.flag_if_supported("-mfp16-format=ieee")
4137
.flag_if_supported("-mno-unaligned-access");
4238
}
4339

44-
ggml_cuda
45-
.cuda(true)
40+
ggml.cuda(true)
4641
.std("c++17")
4742
.flag("-arch=all")
4843
.file("llama.cpp/ggml-cuda.cu");
4944

5045
ggml.define("GGML_USE_CUBLAS", None);
51-
ggml_cuda.define("GGML_USE_CUBLAS", None);
46+
ggml.define("GGML_USE_CUBLAS", None);
5247
llama_cpp.define("GGML_USE_CUBLAS", None);
5348
}
5449

5550
// https://github.com/ggerganov/llama.cpp/blob/191221178f51b6e81122c5bda0fd79620e547d07/Makefile#L133-L141
5651
if cfg!(target_os = "macos") {
5752
assert!(!cublas_enabled, "CUBLAS is not supported on macOS");
5853

54+
println!("cargo:rustc-link-lib=framework=Metal");
55+
println!("cargo:rustc-link-lib=framework=Foundation");
56+
println!("cargo:rustc-link-lib=framework=MetalPerformanceShaders");
57+
println!("cargo:rustc-link-lib=framework=MetalKit");
58+
5959
llama_cpp.define("_DARWIN_C_SOURCE", None);
6060

6161
// https://github.com/ggerganov/llama.cpp/blob/3c0d25c4756742ebf15ad44700fabc0700c638bd/Makefile#L340-L343
@@ -65,40 +65,20 @@ fn main() {
6565
llama_cpp.define("ACCELERATE_LAPACK_ILP64", None);
6666
println!("cargo:rustc-link-arg=framework=Accelerate");
6767

68-
// MK_LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
69-
// https://github.com/ggerganov/llama.cpp/blob/3c0d25c4756742ebf15ad44700fabc0700c638bd/Makefile#L509-L511
70-
println!("cargo:rustc-link-arg=framework=Foundation");
71-
println!("cargo:rustc-link-arg=framework=Metal");
72-
println!("cargo:rustc-link-arg=framework=MetalKit");
73-
}
74-
75-
if let Some(ggml_metal) = &mut ggml_metal {
76-
metal_hack(ggml_metal);
77-
ggml_metal
78-
.file("llama.cpp/ggml-metal")
79-
.include("llama.cpp");
68+
metal_hack(&mut ggml);
69+
ggml.include("./llama.cpp/ggml-metal.h");
8070
}
8171

8272
if cfg!(target_os = "dragonfly") {
8373
llama_cpp.define("__BSD_VISIBLE", None);
8474
}
8575

86-
if let Some(ggml_cuda) = ggml_cuda {
87-
println!("compiling ggml-cuda");
88-
ggml_cuda.compile("ggml-cuda");
89-
}
90-
91-
92-
if let Some(ggml_metal) = ggml_metal {
93-
println!("compiling ggml-metal");
94-
ggml_metal.compile("ggml-metal")
95-
}
96-
9776
if cfg!(target_os = "linux") {
9877
ggml.define("_GNU_SOURCE", None);
9978
}
10079

10180
ggml.std("c17")
81+
.include("./llama.cpp")
10282
.file("llama.cpp/ggml.c")
10383
.file("llama.cpp/ggml-alloc.c")
10484
.file("llama.cpp/ggml-backend.c")
@@ -136,7 +116,6 @@ fn main() {
136116
.expect("failed to write bindings to file");
137117
}
138118

139-
140119
// courtesy of https://github.com/rustformers/llm
141120
fn metal_hack(build: &mut cc::Build) {
142121
const GGML_METAL_METAL_PATH: &str = "llama.cpp/ggml-metal.metal";
@@ -174,4 +153,4 @@ fn metal_hack(build: &mut cc::Build) {
174153
};
175154

176155
build.file(ggml_metal_path);
177-
}
156+
}

0 commit comments

Comments
 (0)