22
33import PackageDescription
44
5+ var sources = [
6+ " src/llama.cpp " ,
7+ " src/llama-arch.cpp " ,
8+ " src/llama-vocab.cpp " ,
9+ " src/llama-grammar.cpp " ,
10+ " src/llama-sampling.cpp " ,
11+ " src/llama-context.cpp " ,
12+ " src/llama-cparams.cpp " ,
13+ " src/llama-hparams.cpp " ,
14+ " src/llama-model.cpp " ,
15+ " src/llama-chat.cpp " ,
16+ " src/llama-adapter.cpp " ,
17+ " src/llama-batch.cpp " ,
18+ " src/llama-grammar.cpp " ,
19+ " src/llama-mmap.cpp " ,
20+ " src/llama-model-loader.cpp " ,
21+ " src/llama-quant.cpp " ,
22+ " src/llama-impl.cpp " ,
23+ " src/llama-kv-cache.cpp " ,
24+ " src/unicode.cpp " ,
25+ " src/unicode-data.cpp " ,
26+ " ggml/src/gguf.cpp " ,
27+ " ggml/src/ggml.c " ,
28+ " ggml/src/ggml-alloc.c " ,
29+ " ggml/src/ggml-backend.cpp " ,
30+ " ggml/src/ggml-backend-reg.cpp " ,
31+ " ggml/src/ggml-cpu/ggml-cpu.c " ,
32+ " ggml/src/ggml-cpu/ggml-cpu.cpp " ,
33+ " ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp " ,
34+ " ggml/src/ggml-cpu/ggml-cpu-hbm.cpp " ,
35+ " ggml/src/ggml-cpu/ggml-cpu-quants.c " ,
36+ " ggml/src/ggml-cpu/ggml-cpu-traits.cpp " ,
37+ " ggml/src/ggml-threading.cpp " ,
38+ " ggml/src/ggml-quants.c " ,
39+ ]
40+
41+ var resources : [ Resource ] = [ ]
42+ var linkerSettings : [ LinkerSetting ] = [ ]
43+ var cSettings : [ CSetting ] = [
44+ . unsafeFlags( [ " -Wno-shorten-64-to-32 " , " -O3 " , " -DNDEBUG " ] ) ,
45+ . unsafeFlags( [ " -fno-objc-arc " ] ) ,
46+ . headerSearchPath( " ggml/src " ) ,
47+ . headerSearchPath( " ggml/src/ggml-cpu " ) ,
48+ // NOTE: NEW_LAPACK will required iOS version 16.4+
49+ // We should consider add this in the future when we drop support for iOS 14
50+ // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
51+ // .define("ACCELERATE_NEW_LAPACK"),
52+ // .define("ACCELERATE_LAPACK_ILP64")
53+ . define( " GGML_USE_CPU " ) ,
54+ ]
55+
56+ #if canImport(Darwin)
57+ sources. append ( " ggml/src/ggml-common.h " )
58+ sources. append ( " ggml/src/ggml-metal/ggml-metal.m " )
59+ resources. append ( . process( " ggml/src/ggml-metal/ggml-metal.metal " ) )
60+ linkerSettings. append ( . linkedFramework( " Accelerate " ) )
61+ cSettings. append (
62+ contentsOf: [
63+ . define( " GGML_USE_ACCELERATE " ) ,
64+ . define( " GGML_USE_METAL " ) ,
65+ ]
66+ )
67+ #endif
68+
69+ #if os(Linux)
70+ cSettings. append ( . define( " _GNU_SOURCE " ) )
71+ #endif
72+
573let package = Package (
674 name: " llama " ,
775 platforms: [
@@ -14,6 +82,26 @@ let package = Package(
1482 . library( name: " llama " , targets: [ " llama " ] ) ,
1583 ] ,
1684 targets: [
17- . systemLibrary( name: " llama " , pkgConfig: " llama " ) ,
18- ]
85+ . target(
86+ name: " llama " ,
87+ path: " . " ,
88+ exclude: [
89+ " build " ,
90+ " cmake " ,
91+ " examples " ,
92+ " scripts " ,
93+ " models " ,
94+ " tests " ,
95+ " CMakeLists.txt " ,
96+ " Makefile " ,
97+ " ggml/src/ggml-metal-embed.metal "
98+ ] ,
99+ sources: sources,
100+ resources: resources,
101+ publicHeadersPath: " spm-headers " ,
102+ cSettings: cSettings,
103+ linkerSettings: linkerSettings
104+ )
105+ ] ,
106+ cxxLanguageStandard: . cxx17
19107)
0 commit comments