Skip to content

Commit f6135fa

Browse files
committed
devops: disable ppc64le to test s390x, check test build
Signed-off-by: Aaron Teo <[email protected]>
1 parent 1e1bc50 commit f6135fa

File tree

2 files changed

+32
-15
lines changed

2 files changed

+32
-15
lines changed

.github/workflows/build.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,8 +192,9 @@ jobs:
192192
os: ubuntu-22.04-arm
193193
- build: 's390x'
194194
os: ubuntu-24.04-s390x
195-
- build: 'ppc64le'
196-
os: ubuntu-24.04-ppc64le
195+
# TODO: Disable ppc64le for now to test s390x
196+
# - build: 'ppc64le'
197+
# os: ubuntu-24.04-ppc64le
197198

198199
runs-on: ${{ matrix.os }}
199200

tests/CMakeLists.txt

Lines changed: 29 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -111,19 +111,35 @@ endfunction()
111111
# build test-tokenizer-0 target once and add many tests
112112
llama_build(test-tokenizer-0.cpp)
113113

114-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-bert-bge.gguf)
115-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-command-r.gguf)
116-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-coder.gguf)
117-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-llm.gguf)
118-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf)
119-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf)
120-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf)
121-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf)
122-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf)
123-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-phi-3.gguf)
124-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-qwen2.gguf)
125-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf)
126-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf)
114+
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
115+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-bert-bge.gguf)
116+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-command-r.gguf)
117+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-coder.gguf)
118+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-llm.gguf)
119+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf)
120+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf)
121+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf)
122+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf)
123+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf)
124+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-phi-3.gguf)
125+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-qwen2.gguf)
126+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf)
127+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf)
128+
else()
129+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-bert-bge-be.gguf)
130+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-command-r-be.gguf)
131+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-coder-be.gguf)
132+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-llm-be.gguf)
133+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon-be.gguf)
134+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2-be.gguf)
135+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe-be.gguf)
136+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm-be.gguf)
137+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt-be.gguf)
138+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-phi-3-be.gguf)
139+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-qwen2-be.gguf)
140+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact-be.gguf)
141+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder-be ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder-be.gguf)
142+
endif()
127143

128144
if (NOT WIN32)
129145
llama_test_cmd(

0 commit comments

Comments
 (0)