@@ -111,31 +111,31 @@ endfunction()
111111# build test-tokenizer-0 target once and add many tests
112112llama_build(test -tokenizer-0.cpp)
113113
114- llama_test(test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-bert-bge.gguf)
115- llama_test(test -tokenizer-0 NAME test -tokenizer-0-command -r ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-command -r.gguf)
116- llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-deepseek-coder.gguf)
117- llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-deepseek-llm.gguf)
118- llama_test(test -tokenizer-0 NAME test -tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-falcon.gguf)
119- llama_test(test -tokenizer-0 NAME test -tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-gpt-2.gguf)
120- llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-bpe.gguf)
121- llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-spm.gguf)
122- llama_test(test -tokenizer-0 NAME test -tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-mpt.gguf)
123- llama_test(test -tokenizer-0 NAME test -tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-phi-3.gguf)
124- llama_test(test -tokenizer-0 NAME test -tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-qwen2.gguf)
125- llama_test(test -tokenizer-0 NAME test -tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-refact.gguf)
126- llama_test(test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-starcoder.gguf)
114+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-bert-bge.gguf)
115+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-command -r ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-command -r.gguf)
116+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-coder.gguf)
117+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-llm.gguf)
118+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-falcon.gguf)
119+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-2.gguf)
120+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf)
121+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-spm.gguf)
122+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-mpt.gguf)
123+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-phi-3.gguf)
124+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-qwen2.gguf)
125+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-refact.gguf)
126+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-starcoder.gguf)
127127
128128if (NOT WIN32 )
129129 llama_test_cmd(
130130 ${CMAKE_CURRENT_SOURCE_DIR} /test -tokenizers-repo.sh
131131 NAME test -tokenizers-ggml-vocabs
132132 WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
133- ARGS https://huggingface.co/ggml-org/vocabs ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocabs
133+ ARGS https://huggingface.co/ggml-org/vocabs ${PROJECT_SOURCE_DIR} /models/ggml-vocabs
134134 )
135135endif ()
136136
137137if (LLAMA_LLGUIDANCE)
138- llama_build_and_test(test -grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-bpe.gguf)
138+ llama_build_and_test(test -grammar-llguidance.cpp ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf)
139139endif ()
140140
141141if (NOT WIN32 OR NOT BUILD_SHARED_LIBS )
@@ -147,8 +147,8 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
147147 llama_build_and_test(test -chat.cpp)
148148 # TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
149149 if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64" )
150- llama_build_and_test(test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} /.. )
151- target_include_directories (test -json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} /.. /tools/server)
150+ llama_build_and_test(test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} )
151+ target_include_directories (test -json-schema-to-grammar PRIVATE ${PROJECT_SOURCE_DIR} /tools/server)
152152 endif ()
153153
154154 if (NOT GGML_BACKEND_DL)
@@ -161,20 +161,20 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
161161 llama_build(test -tokenizer-1-bpe.cpp)
162162
163163 # TODO: disabled due to slowness
164- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-aquila.gguf)
165- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-falcon.gguf)
166- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-gpt-2.gguf)
167- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-gpt-neox.gguf)
168- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-llama-bpe.gguf --ignore-merges)
169- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-mpt.gguf)
170- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-refact.gguf)
171- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-starcoder.gguf)
164+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-aquila.gguf)
165+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-falcon.gguf)
166+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-2.gguf)
167+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-neox.gguf)
168+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf --ignore-merges)
169+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-mpt.gguf)
170+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-refact.gguf)
171+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-starcoder.gguf)
172172
173173 # build test-tokenizer-1-spm target once and add many tests
174174 llama_build(test -tokenizer-1-spm.cpp)
175175
176- llama_test(test -tokenizer-1-spm NAME test -tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-spm.gguf)
177- #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-baichuan.gguf)
176+ llama_test(test -tokenizer-1-spm NAME test -tokenizer-1-llama-spm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-spm.gguf)
177+ #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-baichuan.gguf)
178178
179179 # llama_build_and_test(test-double-float.cpp) # SLOW
180180endif ()
0 commit comments