@@ -51,7 +51,28 @@ fn main() {
5151
5252 // https://github.com/ggerganov/llama.cpp/blob/191221178f51b6e81122c5bda0fd79620e547d07/Makefile#L133-L141
5353 if cfg ! ( target_os = "macos" ) {
54+ assert ! ( !cublas_enabled, "CUBLAS is not supported on macOS" ) ;
55+
5456 llama_cpp. define ( "_DARWIN_C_SOURCE" , None ) ;
57+
58+ // https://github.com/ggerganov/llama.cpp/blob/3c0d25c4756742ebf15ad44700fabc0700c638bd/Makefile#L340-L343
59+ llama_cpp. define ( "GGML_USE_METAL" , None ) ;
60+ llama_cpp. define ( "GGML_USE_ACCELERATE" , None ) ;
61+ llama_cpp. define ( "ACCELERATE_NEW_LAPACK" , None ) ;
62+ llama_cpp. define ( "ACCELERATE_LAPACK_ILP64" , None ) ;
63+ println ! ( "cargo:rustc-link-lib=framework=Accelerate" ) ;
64+
65+ // MK_LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
66+ // https://github.com/ggerganov/llama.cpp/blob/3c0d25c4756742ebf15ad44700fabc0700c638bd/Makefile#L509-L511
67+ println ! ( "cargo:rustc-link-lib=framework Foundation" ) ;
68+ println ! ( "cargo:rustc-link-lib=framework Metal" ) ;
69+ println ! ( "cargo:rustc-link-lib=framework MetalKit" ) ;
70+
71+
72+ // https://github.com/ggerganov/llama.cpp/blob/3c0d25c4756742ebf15ad44700fabc0700c638bd/Makefile#L517-L520
73+ ggml
74+ . file ( "llama.cpp/ggml-metal.m" )
75+ . file ( "llama.cpp/ggml-metal.h" ) ;
5576 }
5677 if cfg ! ( target_os = "dragonfly" ) {
5778 llama_cpp. define ( "__BSD_VISIBLE" , None ) ;
0 commit comments