@@ -111,19 +111,35 @@ endfunction()
111111# build test-tokenizer-0 target once and add many tests
112112llama_build(test -tokenizer-0.cpp)
113113
114- llama_test(test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-bert-bge.gguf)
115- llama_test(test -tokenizer-0 NAME test -tokenizer-0-command -r ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-command -r.gguf)
116- llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-coder.gguf)
117- llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-llm.gguf)
118- llama_test(test -tokenizer-0 NAME test -tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-falcon.gguf)
119- llama_test(test -tokenizer-0 NAME test -tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-2.gguf)
120- llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf)
121- llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-spm.gguf)
122- llama_test(test -tokenizer-0 NAME test -tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-mpt.gguf)
123- llama_test(test -tokenizer-0 NAME test -tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-phi-3.gguf)
124- llama_test(test -tokenizer-0 NAME test -tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-qwen2.gguf)
125- llama_test(test -tokenizer-0 NAME test -tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-refact.gguf)
126- llama_test(test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-starcoder.gguf)
114+ if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x" )
115+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-bert-bge.gguf)
116+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-command -r ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-command -r.gguf)
117+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-coder.gguf)
118+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-llm.gguf)
119+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-falcon.gguf)
120+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-2.gguf)
121+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf)
122+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-spm.gguf)
123+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-mpt.gguf)
124+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-phi-3.gguf)
125+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-qwen2.gguf)
126+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-refact.gguf)
127+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-starcoder.gguf)
128+ else ()
129+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-bert-bge-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-bert-bge-be.gguf)
130+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-command -r-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-command -r-be.gguf)
131+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-coder-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-coder-be.gguf)
132+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-deepseek-llm-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-llm-be.gguf)
133+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-falcon-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-falcon-be.gguf)
134+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-gpt-2-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-2-be.gguf)
135+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-bpe-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe-be.gguf)
136+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-llama-spm-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-spm-be.gguf)
137+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-mpt-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-mpt-be.gguf)
138+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-phi-3-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-phi-3-be.gguf)
139+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-qwen2-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-qwen2-be.gguf)
140+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-refact-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-refact-be.gguf)
141+ llama_test(test -tokenizer-0 NAME test -tokenizer-0-starcoder-be ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-starcoder-be.gguf)
142+ endif ()
127143
128144if (NOT WIN32 )
129145 llama_test_cmd(
0 commit comments