11llama_add_compile_flags()
22
3+ function (llama_build source )
4+ if (DEFINED LLAMA_TEST_NAME)
5+ set (TEST_TARGET ${LLAMA_TEST_NAME} )
6+ else ()
7+ get_filename_component (TEST_TARGET ${source} NAME_WE )
8+ endif ()
9+
10+ add_executable (${TEST_TARGET} ${source} )
11+ target_link_libraries (${TEST_TARGET} PRIVATE common)
12+ install (TARGETS ${TEST_TARGET} RUNTIME)
13+ endfunction ()
14+
315function (llama_test target )
416 include (CMakeParseArguments)
517 set (options )
@@ -36,7 +48,7 @@ endfunction()
3648# - LABEL: label for the test (defaults to main)
3749# - ARGS: arguments to pass to the test executable
3850# - WORKING_DIRECTORY
39- function (llama_target_and_test source )
51+ function (llama_build_and_test source )
4052 include (CMakeParseArguments)
4153 set (options )
4254 set (oneValueArgs NAME LABEL WORKING_DIRECTORY )
@@ -58,6 +70,7 @@ function(llama_target_and_test source)
5870 add_executable (${TEST_TARGET} ${source} get -model.cpp)
5971 install (TARGETS ${TEST_TARGET} RUNTIME)
6072 target_link_libraries (${TEST_TARGET} PRIVATE common)
73+
6174 add_test (
6275 NAME ${TEST_TARGET}
6376 WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
@@ -68,9 +81,7 @@ function(llama_target_and_test source)
6881endfunction ()
6982
7083# build test-tokenizer-0 target once and add many tests
71- add_executable (test -tokenizer-0 test -tokenizer-0.cpp)
72- target_link_libraries (test -tokenizer-0 PRIVATE common)
73- install (TARGETS test -tokenizer-0 RUNTIME)
84+ llama_build(test -tokenizer-0.cpp)
7485
7586llama_test(test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-bert-bge.gguf)
7687llama_test(test -tokenizer-0 NAME test -tokenizer-0-command -r ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-command -r.gguf)
@@ -87,27 +98,27 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE
8798llama_test(test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-starcoder.gguf)
8899
89100if (LLAMA_LLGUIDANCE)
90- llama_target_and_test (test -grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-llama-bpe.gguf)
101+ llama_build_and_test (test -grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-llama-bpe.gguf)
91102endif ()
92103
93104if (NOT WIN32 )
94105 # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API
95- llama_target_and_test (test -sampling.cpp)
96- llama_target_and_test (test -grammar-parser.cpp)
97- llama_target_and_test (test -grammar-integration.cpp)
98- llama_target_and_test (test -llama-grammar.cpp)
99- llama_target_and_test (test -chat.cpp)
106+ llama_build_and_test (test -sampling.cpp)
107+ llama_build_and_test (test -grammar-parser.cpp)
108+ llama_build_and_test (test -grammar-integration.cpp)
109+ llama_build_and_test (test -llama-grammar.cpp)
110+ llama_build_and_test (test -chat.cpp)
100111 # TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
101112 if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64" )
102- llama_target_and_test (test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} /..)
113+ llama_build_and_test (test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} /..)
103114 target_include_directories (test -json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} /../examples/server)
104115 endif ()
105116
117+ llama_build(test -quantize-stats.cpp)
118+ llama_build(test -gbnf-validator.cpp)
106119
107120 # build test-tokenizer-1-bpe target once and add many tests
108- add_executable (test -tokenizer-1-bpe test -tokenizer-1-bpe.cpp)
109- target_link_libraries (test -tokenizer-1-bpe PRIVATE common)
110- install (TARGETS test -tokenizer-1-bpe RUNTIME)
121+ llama_build(test -tokenizer-1-bpe.cpp)
111122
112123 # TODO: disabled due to slowness
113124 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
@@ -120,37 +131,35 @@ if (NOT WIN32)
120131 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
121132
122133 # build test-tokenizer-1-spm target once and add many tests
123- add_executable (test -tokenizer-1-spm test -tokenizer-1-spm.cpp)
124- target_link_libraries (test -tokenizer-1-spm PRIVATE common)
125- install (TARGETS test -tokenizer-1-spm RUNTIME)
134+ llama_build(test -tokenizer-1-spm.cpp)
126135
127136 llama_test(test -tokenizer-1-spm NAME test -tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-llama-spm.gguf)
128137 #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
129138
130- # llama_target_and_test (test-double-float.cpp) # SLOW
139+ # llama_build_and_test (test-double-float.cpp) # SLOW
131140endif ()
132141
133- llama_target_and_test (test -log .cpp)
134- llama_target_and_test (test -chat-template.cpp)
142+ llama_build_and_test (test -log .cpp)
143+ llama_build_and_test (test -chat-template.cpp)
135144
136145# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
137146if (NOT WIN32 )
138- llama_target_and_test (test -arg-parser.cpp)
147+ llama_build_and_test (test -arg-parser.cpp)
139148endif ()
140149
141- # llama_target_and_test (test-opt.cpp) # SLOW
142- llama_target_and_test (test -gguf.cpp)
143- llama_target_and_test (test -backend-ops.cpp)
150+ # llama_build_and_test (test-opt.cpp) # SLOW
151+ llama_build_and_test (test -gguf.cpp)
152+ llama_build_and_test (test -backend-ops.cpp)
144153
145- llama_target_and_test (test -model-load-cancel.cpp LABEL "model" )
146- llama_target_and_test (test -autorelease.cpp LABEL "model" )
154+ llama_build_and_test (test -model-load-cancel.cpp LABEL "model" )
155+ llama_build_and_test (test -autorelease.cpp LABEL "model" )
147156
148157if (NOT GGML_BACKEND_DL)
149158 # these tests use the backends directly and cannot be built with dynamic loading
150- llama_target_and_test (test -barrier.cpp)
151- llama_target_and_test (test -quantize-fns.cpp)
152- llama_target_and_test (test -quantize-perf.cpp)
153- llama_target_and_test (test -rope.cpp)
159+ llama_build_and_test (test -barrier.cpp)
160+ llama_build_and_test (test -quantize-fns.cpp)
161+ llama_build_and_test (test -quantize-perf.cpp)
162+ llama_build_and_test (test -rope.cpp)
154163endif ()
155164
156165
0 commit comments