@@ -1248,8 +1248,21 @@ jobs:
12481248 -DSOC_TYPE=${{ matrix.device }}
12491249 cmake --build build -j $(nproc)
12501250
1251- ggml-ci-mac :
1252- runs-on : [self-hosted, macOS, ARM64]
1251+ ggml-ci-x64-cpu-low-perf :
1252+ runs-on : [self-hosted, Linux, X64, CPU, low-perf]
1253+
1254+ steps :
1255+ - name : Clone
1256+ id : checkout
1257+ uses : actions/checkout@v4
1258+
1259+ - name : Test
1260+ id : ggml-ci
1261+ run : |
1262+ bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1263+
1264+ ggml-ci-arm64-cpu-low-perf :
1265+ runs-on : [self-hosted, Linux, X64, CPU, low-perf]
12531266
12541267 steps :
12551268 - name : Clone
@@ -1259,11 +1272,63 @@ jobs:
12591272 - name : Test
12601273 id : ggml-ci
12611274 run : |
1262- bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
1275+ bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1276+
1277+ ggml-ci-x64-cpu-high-perf :
1278+ runs-on : [self-hosted, Linux, X64, CPU, high-perf]
1279+
1280+ steps :
1281+ - name : Clone
1282+ id : checkout
1283+ uses : actions/checkout@v4
1284+
1285+ - name : Test
1286+ id : ggml-ci
1287+ run : |
1288+ bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1289+
1290+ ggml-ci-arm64-cpu-high-perf :
1291+ runs-on : [self-hosted, Linux, X64, CPU, high-perf]
1292+
1293+ steps :
1294+ - name : Clone
1295+ id : checkout
1296+ uses : actions/checkout@v4
1297+
1298+ - name : Test
1299+ id : ggml-ci
1300+ run : |
1301+ bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
12631302
12641303 ggml-ci-x64-cuda :
12651304 runs-on : [self-hosted, Linux, X64, CUDA]
12661305
1306+ steps :
1307+ - name : Clone
1308+ id : checkout
1309+ uses : actions/checkout@v4
1310+
1311+ - name : Test
1312+ id : ggml-ci
1313+ run : |
1314+ GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1315+
1316+ ggml-ci-x64-vulkan :
1317+ runs-on : [self-hosted, Linux, X64, CUDA]
1318+
1319+ steps :
1320+ - name : Clone
1321+ id : checkout
1322+ uses : actions/checkout@v4
1323+
1324+ - name : Test
1325+ id : ggml-ci
1326+ run : |
1327+ GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1328+
1329+ ggml-ci-x64-cpu-amx :
1330+ runs-on : [self-hosted, Linux, X64, CPU, AMX]
1331+
12671332 steps :
12681333 - name : Clone
12691334 id : checkout
@@ -1273,3 +1338,30 @@ jobs:
12731338 id : ggml-ci
12741339 run : |
12751340 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1341+
1342+ ggml-ci-mac :
1343+ runs-on : [self-hosted, macOS, ARM64]
1344+
1345+ steps :
1346+ - name : Clone
1347+ id : checkout
1348+ uses : actions/checkout@v4
1349+
1350+ - name : Test
1351+ id : ggml-ci
1352+ run : |
1353+ GG_BUILD_METAL=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
1354+
1355+ # TODO: install vulkan drivers
1356+ # ggml-ci-mac-vulkan:
1357+ # runs-on: [self-hosted, macOS, ARM64]
1358+ #
1359+ # steps:
1360+ # - name: Clone
1361+ # id: checkout
1362+ # uses: actions/checkout@v4
1363+ #
1364+ # - name: Test
1365+ # id: ggml-ci
1366+ # run: |
1367+ # GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
0 commit comments