Skip to content

Commit a4fad89

Browse files
committed
cmake : rework tests
ggml-ci
1 parent 9af7a01 commit a4fad89

File tree

1 file changed

+39
-32
lines changed

1 file changed

+39
-32
lines changed

tests/CMakeLists.txt

Lines changed: 39 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,17 @@
11
llama_add_compile_flags()
22

3+
function(llama_build source)
4+
if (DEFINED LLAMA_TEST_NAME)
5+
set(TEST_TARGET ${LLAMA_TEST_NAME})
6+
else()
7+
get_filename_component(TEST_TARGET ${source} NAME_WE)
8+
endif()
9+
10+
add_executable(${TEST_TARGET} ${source})
11+
target_link_libraries(${TEST_TARGET} PRIVATE common)
12+
install(TARGETS ${TEST_TARGET} RUNTIME)
13+
endfunction()
14+
315
function(llama_test target)
416
include(CMakeParseArguments)
517
set(options)
@@ -36,7 +48,7 @@ endfunction()
3648
# - LABEL: label for the test (defaults to main)
3749
# - ARGS: arguments to pass to the test executable
3850
# - WORKING_DIRECTORY
39-
function(llama_target_and_test source)
51+
function(llama_build_and_test source)
4052
include(CMakeParseArguments)
4153
set(options)
4254
set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
@@ -58,6 +70,7 @@ function(llama_target_and_test source)
5870
add_executable(${TEST_TARGET} ${source} get-model.cpp)
5971
install(TARGETS ${TEST_TARGET} RUNTIME)
6072
target_link_libraries(${TEST_TARGET} PRIVATE common)
73+
6174
add_test(
6275
NAME ${TEST_TARGET}
6376
WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
@@ -68,9 +81,7 @@ function(llama_target_and_test source)
6881
endfunction()
6982

7083
# build test-tokenizer-0 target once and add many tests
71-
add_executable(test-tokenizer-0 test-tokenizer-0.cpp)
72-
target_link_libraries(test-tokenizer-0 PRIVATE common)
73-
install(TARGETS test-tokenizer-0 RUNTIME)
84+
llama_build(test-tokenizer-0.cpp)
7485

7586
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf)
7687
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf)
@@ -87,29 +98,27 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE
8798
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
8899

89100
if (LLAMA_LLGUIDANCE)
90-
llama_target_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
101+
llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
91102
endif ()
92103

93104
if (NOT WIN32)
94105
# these tests are disabled on Windows because they use internal functions not exported with LLAMA_API
95-
llama_target_and_test(test-sampling.cpp)
96-
llama_target_and_test(test-grammar-parser.cpp)
97-
llama_target_and_test(test-grammar-integration.cpp)
98-
llama_target_and_test(test-llama-grammar.cpp)
99-
llama_target_and_test(test-quantize-stats.cpp)
100-
llama_target_and_test(test-gbnf-validator.cpp)
101-
llama_target_and_test(test-chat.cpp)
106+
llama_build_and_test(test-sampling.cpp)
107+
llama_build_and_test(test-grammar-parser.cpp)
108+
llama_build_and_test(test-grammar-integration.cpp)
109+
llama_build_and_test(test-llama-grammar.cpp)
110+
llama_build_and_test(test-chat.cpp)
102111
# TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
103112
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
104-
llama_target_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
113+
llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
105114
target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../examples/server)
106115
endif()
107116

117+
llama_build(test-quantize-stats.cpp)
118+
llama_build(test-gbnf-validator.cpp)
108119

109120
# build test-tokenizer-1-bpe target once and add many tests
110-
add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)
111-
target_link_libraries(test-tokenizer-1-bpe PRIVATE common)
112-
install(TARGETS test-tokenizer-1-bpe RUNTIME)
121+
llama_build(test-tokenizer-1-bpe.cpp)
113122

114123
# TODO: disabled due to slowness
115124
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
@@ -122,37 +131,35 @@ if (NOT WIN32)
122131
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
123132

124133
# build test-tokenizer-1-spm target once and add many tests
125-
add_executable(test-tokenizer-1-spm test-tokenizer-1-spm.cpp)
126-
target_link_libraries(test-tokenizer-1-spm PRIVATE common)
127-
install(TARGETS test-tokenizer-1-spm RUNTIME)
134+
llama_build(test-tokenizer-1-spm.cpp)
128135

129136
llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
130137
#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
131138

132-
# llama_target_and_test(test-double-float.cpp) # SLOW
139+
# llama_build_and_test(test-double-float.cpp) # SLOW
133140
endif()
134141

135-
llama_target_and_test(test-log.cpp)
136-
llama_target_and_test(test-chat-template.cpp)
142+
llama_build_and_test(test-log.cpp)
143+
llama_build_and_test(test-chat-template.cpp)
137144

138145
# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
139146
if (NOT WIN32)
140-
llama_target_and_test(test-arg-parser.cpp)
147+
llama_build_and_test(test-arg-parser.cpp)
141148
endif()
142149

143-
# llama_target_and_test(test-opt.cpp) # SLOW
144-
llama_target_and_test(test-gguf.cpp)
145-
llama_target_and_test(test-backend-ops.cpp)
150+
# llama_build_and_test(test-opt.cpp) # SLOW
151+
llama_build_and_test(test-gguf.cpp)
152+
llama_build_and_test(test-backend-ops.cpp)
146153

147-
llama_target_and_test(test-model-load-cancel.cpp LABEL "model")
148-
llama_target_and_test(test-autorelease.cpp LABEL "model")
154+
llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
155+
llama_build_and_test(test-autorelease.cpp LABEL "model")
149156

150157
if (NOT GGML_BACKEND_DL)
151158
# these tests use the backends directly and cannot be built with dynamic loading
152-
llama_target_and_test(test-barrier.cpp)
153-
llama_target_and_test(test-quantize-fns.cpp)
154-
llama_target_and_test(test-quantize-perf.cpp)
155-
llama_target_and_test(test-rope.cpp)
159+
llama_build_and_test(test-barrier.cpp)
160+
llama_build_and_test(test-quantize-fns.cpp)
161+
llama_build_and_test(test-quantize-perf.cpp)
162+
llama_build_and_test(test-rope.cpp)
156163
endif()
157164

158165

0 commit comments

Comments
 (0)