Skip to content

Commit 9a60f1b

Browse files
committed
improve array code
1 parent beed7fa commit 9a60f1b

File tree

1 file changed

+19
-33
lines changed

1 file changed

+19
-33
lines changed

build.zig

Lines changed: 19 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Compatible with Zig Version 0.11.0
1+
// Compatible with Zig Version 0.12.0-dev.xx
22
const std = @import("std");
33
const ArrayList = std.ArrayList;
44
const Compile = std.Build.Step.Compile;
@@ -118,17 +118,20 @@ pub fn build(b: *std.Build) !void {
118118
var make = try Maker.init(b);
119119
make.enable_lto = b.option(bool, "lto", "Enable LTO optimization, (default: false)") orelse false;
120120

121+
// Options
121122
const llama_vulkan = b.option(bool, "llama-vulkan", "Enable Vulkan backend for Llama, (default: false)") orelse false;
122123
const llama_metal = b.option(bool, "llama-metal", "Enable Metal backend for Llama, (default: false, true for macos)") orelse (make.target.result.os.tag == .macos);
123124
const llama_no_accelerate = b.option(bool, "llama-no-accelerate", "Disable Accelerate framework for Llama, (default: false)") orelse false;
124125
const llama_accelerate = !llama_no_accelerate and make.target.result.os.tag == .macos;
125126

127+
// Flags
126128
if (llama_accelerate) {
127129
try make.addFlag("-DGGML_USE_ACCELERATE");
128130
try make.addFlag("-DACCELERATE_USE_LAPACK");
129131
try make.addFlag("-DACCELERATE_LAPACK_ILP64");
130132
}
131133

134+
// Objects
132135
var extra_objs = ArrayList(*Compile).init(b.allocator);
133136

134137
if (llama_vulkan) {
@@ -154,42 +157,25 @@ pub fn build(b: *std.Build) !void {
154157
const sampling = make.obj("sampling", "common/sampling.cpp");
155158
const grammar_parser = make.obj("grammar-parser", "common/grammar-parser.cpp");
156159
const clip = make.obj("clip", "examples/llava/clip.cpp");
157-
// const train = make.obj("train", "common/train.cpp");
158-
159-
var exes = ArrayList(*Compile).init(b.allocator);
160-
161-
var objs = ArrayList(*Compile).init(b.allocator);
162-
try objs.appendSlice(&[_]*Compile{
163-
ggml,
164-
ggml_alloc,
165-
ggml_backend,
166-
ggml_quants,
167-
llama,
168-
common,
169-
buildinfo,
170-
sampling,
171-
console,
172-
grammar_parser,
173-
clip,
174-
});
175-
try objs.appendSlice(extra_objs.items);
176-
177-
const main = make.exe("main", "examples/main/main.cpp", objs.items);
178-
try exes.append(main);
179-
180-
// _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
181-
// _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
182-
// _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
183-
// _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
184-
// _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
185-
186-
const server = make.exe("server", "examples/server/server.cpp", objs.items);
160+
const train = make.obj("train", "common/train.cpp");
161+
162+
// Executables
163+
const main = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, console, grammar_parser, clip });
164+
const quantize = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
165+
const perplexity = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
166+
const embedding = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
167+
const finetune = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
168+
const train_text_from_scratch = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
169+
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, console, grammar_parser, clip });
187170
if (make.target.result.os.tag == .windows) {
188171
server.linkSystemLibrary("ws2_32");
189172
}
190-
try exes.append(server);
191173

192-
for (exes.items) |e| {
174+
const exes = [_]*Compile{ main, server, quantize, perplexity, embedding, finetune, train_text_from_scratch };
175+
176+
for (exes) |e| {
177+
for (extra_objs.items) |o| e.addObject(o);
178+
193179
if (llama_vulkan) {
194180
e.linkSystemLibrary("vulkan");
195181
}

0 commit comments

Comments
 (0)