@@ -1276,17 +1276,29 @@ jobs:
12761276          LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt 
12771277
12781278ggml-ci-arm64-cpu-low-perf :
1279-     runs-on : [self-hosted, Linux, ARM64, CPU, low-perf] 
1279+     runs-on : ubuntu-22.04-arm 
12801280
12811281    steps :
12821282      - name : Clone 
12831283        id : checkout 
12841284        uses : actions/checkout@v4 
12851285
1286+       - name : ccache 
1287+ 1288+         with :
1289+           key : ggml-ci-arm64-cpu-low-perf 
1290+           evict-old-files : 1d 
1291+ 
1292+       - name : Dependencies 
1293+         id : depends 
1294+         run : | 
1295+           sudo apt-get update 
1296+           sudo apt-get install build-essential libcurl4-openssl-dev 
1297+ 
12861298name : Test 
12871299        id : ggml-ci 
12881300        run : | 
1289-           bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp  
1301+           LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1  bash ./ci/run.sh ./tmp/results ./tmp/mnt  
12901302
12911303ggml-ci-x64-cpu-high-perf :
12921304    runs-on : ubuntu-22.04 
@@ -1314,17 +1326,29 @@ jobs:
13141326          LLAMA_ARG_THREADS=$(nproc) bash ./ci/run.sh ./tmp/results ./tmp/mnt 
13151327
13161328ggml-ci-arm64-cpu-high-perf :
1317-     runs-on : [self-hosted, Linux, ARM64, CPU, high-perf] 
1329+     runs-on : ubuntu-22.04-arm 
13181330
13191331    steps :
13201332      - name : Clone 
13211333        id : checkout 
13221334        uses : actions/checkout@v4 
13231335
1336+       - name : ccache 
1337+ 1338+         with :
1339+           key : ggml-ci-arm64-cpu-high-perf 
1340+           evict-old-files : 1d 
1341+ 
1342+       - name : Dependencies 
1343+         id : depends 
1344+         run : | 
1345+           sudo apt-get update 
1346+           sudo apt-get install build-essential libcurl4-openssl-dev 
1347+ 
13241348name : Test 
13251349        id : ggml-ci 
13261350        run : | 
1327-           GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp  
1351+           LLAMA_ARG_THREADS=$(nproc)  GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt  
13281352
13291353ggml-ci-x64-nvidia-v100-cuda :
13301354    runs-on : [self-hosted, Linux, X64, NVIDIA, V100] 
0 commit comments