@@ -116,6 +116,7 @@ pub fn build(b: *std.build.Builder) !void {
116116 const ggml_backend = make .obj ("ggml-backend" , "ggml-backend.c" );
117117 const ggml_quants = make .obj ("ggml-quants" , "ggml-quants.c" );
118118 const unicode = make .obj ("unicode" , "unicode.cpp" );
119+ const unicode_data = make .obj ("unicode-data" , "unicode-data.cpp" );
119120 const llama = make .obj ("llama" , "llama.cpp" );
120121 const buildinfo = make .obj ("common" , "common/build-info.cpp" );
121122 const common = make .obj ("common" , "common/common.cpp" );
@@ -127,14 +128,14 @@ pub fn build(b: *std.build.Builder) !void {
127128 const clip = make .obj ("clip" , "examples/llava/clip.cpp" );
128129 const llava = make .obj ("llava" , "examples/llava/llava.cpp" );
129130
130- _ = make .exe ("main" , "examples/main/main.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , sampling , console , grammar_parser });
131- _ = make .exe ("quantize" , "examples/quantize/quantize.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo });
132- _ = make .exe ("perplexity" , "examples/perplexity/perplexity.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo });
133- _ = make .exe ("embedding" , "examples/embedding/embedding.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo });
134- _ = make .exe ("finetune" , "examples/finetune/finetune.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , train });
135- _ = make .exe ("train-text-from-scratch" , "examples/train-text-from-scratch/train-text-from-scratch.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , train });
131+ _ = make .exe ("main" , "examples/main/main.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , sampling , console , grammar_parser });
132+ _ = make .exe ("quantize" , "examples/quantize/quantize.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo });
133+ _ = make .exe ("perplexity" , "examples/perplexity/perplexity.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo });
134+ _ = make .exe ("embedding" , "examples/embedding/embedding.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo });
135+ _ = make .exe ("finetune" , "examples/finetune/finetune.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , train });
136+ _ = make .exe ("train-text-from-scratch" , "examples/train-text-from-scratch/train-text-from-scratch.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , train });
136137
137- const server = make .exe ("server" , "examples/server/server.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , sampling , grammar_parser , json_schema_to_grammar , clip , llava });
138+ const server = make .exe ("server" , "examples/server/server.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , sampling , grammar_parser , json_schema_to_grammar , clip , llava });
138139 if (server .target .isWindows ()) {
139140 server .linkSystemLibrary ("ws2_32" );
140141 }
0 commit comments