1
- // Compatible with Zig Version 0.11.0
1
+ // Compatible with Zig Version 0.12.0-dev.xx
2
2
const std = @import ("std" );
3
3
const ArrayList = std .ArrayList ;
4
4
const Compile = std .Build .Step .Compile ;
@@ -118,17 +118,20 @@ pub fn build(b: *std.Build) !void {
118
118
var make = try Maker .init (b );
119
119
make .enable_lto = b .option (bool , "lto" , "Enable LTO optimization, (default: false)" ) orelse false ;
120
120
121
+ // Options
121
122
const llama_vulkan = b .option (bool , "llama-vulkan" , "Enable Vulkan backend for Llama, (default: false)" ) orelse false ;
122
123
const llama_metal = b .option (bool , "llama-metal" , "Enable Metal backend for Llama, (default: false, true for macos)" ) orelse (make .target .result .os .tag == .macos );
123
124
const llama_no_accelerate = b .option (bool , "llama-no-accelerate" , "Disable Accelerate framework for Llama, (default: false)" ) orelse false ;
124
125
const llama_accelerate = ! llama_no_accelerate and make .target .result .os .tag == .macos ;
125
126
127
+ // Flags
126
128
if (llama_accelerate ) {
127
129
try make .addFlag ("-DGGML_USE_ACCELERATE" );
128
130
try make .addFlag ("-DACCELERATE_USE_LAPACK" );
129
131
try make .addFlag ("-DACCELERATE_LAPACK_ILP64" );
130
132
}
131
133
134
+ // Objects
132
135
var extra_objs = ArrayList (* Compile ).init (b .allocator );
133
136
134
137
if (llama_vulkan ) {
@@ -154,42 +157,25 @@ pub fn build(b: *std.Build) !void {
154
157
const sampling = make .obj ("sampling" , "common/sampling.cpp" );
155
158
const grammar_parser = make .obj ("grammar-parser" , "common/grammar-parser.cpp" );
156
159
const clip = make .obj ("clip" , "examples/llava/clip.cpp" );
157
- // const train = make.obj("train", "common/train.cpp");
158
-
159
- var exes = ArrayList (* Compile ).init (b .allocator );
160
-
161
- var objs = ArrayList (* Compile ).init (b .allocator );
162
- try objs .appendSlice (&[_ ]* Compile {
163
- ggml ,
164
- ggml_alloc ,
165
- ggml_backend ,
166
- ggml_quants ,
167
- llama ,
168
- common ,
169
- buildinfo ,
170
- sampling ,
171
- console ,
172
- grammar_parser ,
173
- clip ,
174
- });
175
- try objs .appendSlice (extra_objs .items );
176
-
177
- const main = make .exe ("main" , "examples/main/main.cpp" , objs .items );
178
- try exes .append (main );
179
-
180
- // _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
181
- // _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
182
- // _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
183
- // _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
184
- // _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
185
-
186
- const server = make .exe ("server" , "examples/server/server.cpp" , objs .items );
160
+ const train = make .obj ("train" , "common/train.cpp" );
161
+
162
+ // Executables
163
+ const main = make .exe ("main" , "examples/main/main.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , common , buildinfo , sampling , console , grammar_parser , clip });
164
+ const quantize = make .exe ("quantize" , "examples/quantize/quantize.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , common , buildinfo });
165
+ const perplexity = make .exe ("perplexity" , "examples/perplexity/perplexity.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , common , buildinfo });
166
+ const embedding = make .exe ("embedding" , "examples/embedding/embedding.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , common , buildinfo });
167
+ const finetune = make .exe ("finetune" , "examples/finetune/finetune.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , common , buildinfo , train });
168
+ const train_text_from_scratch = make .exe ("train-text-from-scratch" , "examples/train-text-from-scratch/train-text-from-scratch.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , common , buildinfo , train });
169
+ const server = make .exe ("server" , "examples/server/server.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , common , buildinfo , sampling , console , grammar_parser , clip });
187
170
if (make .target .result .os .tag == .windows ) {
188
171
server .linkSystemLibrary ("ws2_32" );
189
172
}
190
- try exes .append (server );
191
173
192
- for (exes .items ) | e | {
174
+ const exes = [_ ]* Compile { main , server , quantize , perplexity , embedding , finetune , train_text_from_scratch };
175
+
176
+ for (exes ) | e | {
177
+ for (extra_objs .items ) | o | e .addObject (o );
178
+
193
179
if (llama_vulkan ) {
194
180
e .linkSystemLibrary ("vulkan" );
195
181
}
0 commit comments