Skip to content

Commit b9ba4e6

Browse files
SystemclusterMarcusDunn
authored andcommitted
Add support for building with CUDA on Windows
1 parent 5c8fb8a commit b9ba4e6

File tree

1 file changed

+12
-4
lines changed

1 file changed

+12
-4
lines changed

llama-cpp-sys-2/build.rs

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,14 @@ fn main() {
2424

2525
// https://github.com/ggerganov/llama.cpp/blob/a836c8f534ab789b02da149fbdaf7735500bff74/Makefile#L364-L368
2626
if let Some(ggml_cuda) = &mut ggml_cuda {
27-
for lib in [
28-
"cuda", "cublas", "culibos", "cudart", "cublasLt", "pthread", "dl", "rt",
29-
] {
27+
for lib in ["cuda", "cublas", "cudart", "cublasLt"] {
3028
println!("cargo:rustc-link-lib={}", lib);
3129
}
30+
if !ggml_cuda.get_compiler().is_like_msvc() {
31+
for lib in ["culibos", "pthread", "dl", "rt"] {
32+
println!("cargo:rustc-link-lib={}", lib);
33+
}
34+
}
3235

3336
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
3437

@@ -46,10 +49,15 @@ fn main() {
4649

4750
ggml_cuda
4851
.cuda(true)
49-
.std("c++17")
5052
.flag("-arch=all")
5153
.file("llama.cpp/ggml-cuda.cu");
5254

55+
if ggml_cuda.get_compiler().is_like_msvc() {
56+
ggml_cuda.std("c++14");
57+
} else {
58+
ggml_cuda.std("c++17");
59+
}
60+
5361
ggml.define("GGML_USE_CUBLAS", None);
5462
ggml_cuda.define("GGML_USE_CUBLAS", None);
5563
llama_cpp.define("GGML_USE_CUBLAS", None);

0 commit comments

Comments
 (0)