Skip to content

Commit 18f15b1

Browse files
committed
swift : revert changes
1 parent cde3833 commit 18f15b1

File tree

3 files changed

+92
-38
lines changed

3 files changed

+92
-38
lines changed

.github/workflows/build.yml

Lines changed: 1 addition & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -660,24 +660,10 @@ jobs:
660660
run: |
661661
brew update
662662
663-
- name: Build llama.cpp with CMake
664-
id: cmake_build
665-
run: |
666-
sysctl -a
667-
cmake -B build -G Xcode \
668-
-DGGML_METAL_USE_BF16=ON \
669-
-DGGML_METAL_EMBED_LIBRARY=ON \
670-
-DLLAMA_BUILD_EXAMPLES=OFF \
671-
-DLLAMA_BUILD_TESTS=OFF \
672-
-DLLAMA_BUILD_SERVER=OFF \
673-
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
674-
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
675-
sudo cmake --install build --config Release
676-
677663
- name: xcodebuild for swift package
678664
id: xcodebuild
679665
run: |
680-
xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}"
666+
xcodebuild -scheme llama -destination "${{ matrix.destination }}"
681667
682668
windows-msys2:
683669
runs-on: windows-latest
@@ -1284,27 +1270,6 @@ jobs:
12841270
- name: Checkout code
12851271
uses: actions/checkout@v4
12861272

1287-
- name: Build
1288-
id: cmake_build
1289-
run: |
1290-
sysctl -a
1291-
cmake -B build -G Xcode \
1292-
-DGGML_METAL_USE_BF16=ON \
1293-
-DGGML_METAL_EMBED_LIBRARY=ON \
1294-
-DLLAMA_BUILD_EXAMPLES=OFF \
1295-
-DLLAMA_BUILD_TESTS=OFF \
1296-
-DLLAMA_BUILD_SERVER=OFF \
1297-
-DCMAKE_SYSTEM_NAME=iOS \
1298-
-DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
1299-
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
1300-
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
1301-
sudo cmake --install build --config Release
1302-
1303-
- name: xcodebuild for swift package
1304-
id: xcodebuild
1305-
run: |
1306-
xcodebuild -scheme llama-Package -destination 'generic/platform=iOS'
1307-
13081273
- name: Build Xcode project
13091274
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
13101275

Package.swift

Lines changed: 90 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,74 @@
22

33
import PackageDescription
44

5+
var sources = [
6+
"src/llama.cpp",
7+
"src/llama-arch.cpp",
8+
"src/llama-vocab.cpp",
9+
"src/llama-grammar.cpp",
10+
"src/llama-sampling.cpp",
11+
"src/llama-context.cpp",
12+
"src/llama-cparams.cpp",
13+
"src/llama-hparams.cpp",
14+
"src/llama-model.cpp",
15+
"src/llama-chat.cpp",
16+
"src/llama-adapter.cpp",
17+
"src/llama-batch.cpp",
18+
"src/llama-grammar.cpp",
19+
"src/llama-mmap.cpp",
20+
"src/llama-model-loader.cpp",
21+
"src/llama-quant.cpp",
22+
"src/llama-impl.cpp",
23+
"src/llama-kv-cache.cpp",
24+
"src/unicode.cpp",
25+
"src/unicode-data.cpp",
26+
"ggml/src/gguf.cpp",
27+
"ggml/src/ggml.c",
28+
"ggml/src/ggml-alloc.c",
29+
"ggml/src/ggml-backend.cpp",
30+
"ggml/src/ggml-backend-reg.cpp",
31+
"ggml/src/ggml-cpu/ggml-cpu.c",
32+
"ggml/src/ggml-cpu/ggml-cpu.cpp",
33+
"ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp",
34+
"ggml/src/ggml-cpu/ggml-cpu-hbm.cpp",
35+
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
36+
"ggml/src/ggml-cpu/ggml-cpu-traits.cpp",
37+
"ggml/src/ggml-threading.cpp",
38+
"ggml/src/ggml-quants.c",
39+
]
40+
41+
var resources: [Resource] = []
42+
var linkerSettings: [LinkerSetting] = []
43+
var cSettings: [CSetting] = [
44+
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
45+
.unsafeFlags(["-fno-objc-arc"]),
46+
.headerSearchPath("ggml/src"),
47+
.headerSearchPath("ggml/src/ggml-cpu"),
48+
// NOTE: NEW_LAPACK will required iOS version 16.4+
49+
// We should consider add this in the future when we drop support for iOS 14
50+
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
51+
// .define("ACCELERATE_NEW_LAPACK"),
52+
// .define("ACCELERATE_LAPACK_ILP64")
53+
.define("GGML_USE_CPU"),
54+
]
55+
56+
#if canImport(Darwin)
57+
sources.append("ggml/src/ggml-common.h")
58+
sources.append("ggml/src/ggml-metal/ggml-metal.m")
59+
resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal"))
60+
linkerSettings.append(.linkedFramework("Accelerate"))
61+
cSettings.append(
62+
contentsOf: [
63+
.define("GGML_USE_ACCELERATE"),
64+
.define("GGML_USE_METAL"),
65+
]
66+
)
67+
#endif
68+
69+
#if os(Linux)
70+
cSettings.append(.define("_GNU_SOURCE"))
71+
#endif
72+
573
let package = Package(
674
name: "llama",
775
platforms: [
@@ -14,6 +82,26 @@ let package = Package(
1482
.library(name: "llama", targets: ["llama"]),
1583
],
1684
targets: [
17-
.systemLibrary(name: "llama", pkgConfig: "llama"),
18-
]
85+
.target(
86+
name: "llama",
87+
path: ".",
88+
exclude: [
89+
"build",
90+
"cmake",
91+
"examples",
92+
"scripts",
93+
"models",
94+
"tests",
95+
"CMakeLists.txt",
96+
"Makefile",
97+
"ggml/src/ggml-metal-embed.metal"
98+
],
99+
sources: sources,
100+
resources: resources,
101+
publicHeadersPath: "spm-headers",
102+
cSettings: cSettings,
103+
linkerSettings: linkerSettings
104+
)
105+
],
106+
cxxLanguageStandard: .cxx17
19107
)

spm-headers/gguf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
../ggml/include/gguf.h

0 commit comments

Comments
 (0)