File tree Expand file tree Collapse file tree 2 files changed +49
-1
lines changed Expand file tree Collapse file tree 2 files changed +49
-1
lines changed Original file line number Diff line number Diff line change @@ -26,4 +26,43 @@ GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
2626# with SYCL support
2727source /opt/intel/oneapi/setvars.sh
2828GG_BUILD_SYCL=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
29+
30+ # with MUSA support
31+ GG_BUILD_MUSA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
32+ ```
33+
34+ ## Running MUSA CI in a Docker Container
35+
36+ Assuming ` $PWD ` is the root of the ` llama.cpp ` repository, follow these steps to set up and run MUSA CI in a Docker container:
37+
38+ ### 1. Create a local directory to store cached models, configuration files and venv:
39+
40+ ``` bash
41+ mkdir -p $HOME /llama.cpp/ci-cache
42+ ```
43+
44+ ### 2. Create a local directory to store CI run results:
45+
46+ ``` bash
47+ mkdir -p $HOME /llama.cpp/ci-results
48+ ```
49+
50+ ### 3. Start a Docker container and run the CI:
51+
52+ ``` bash
53+ docker run --privileged -it \
54+ -v $HOME /llama.cpp/ci-cache:/ci-cache \
55+ -v $HOME /llama.cpp/ci-results:/ci-results \
56+ -v $PWD :/ws -w /ws \
57+ mthreads/musa:rc3.1.1-devel-ubuntu22.04
2958```
59+
60+ Inside the container, execute the following commands:
61+
62+ ``` bash
63+ apt update -y && apt install -y cmake git python3.10-venv wget
64+ git config --global --add safe.directory /ws
65+ GG_BUILD_MUSA=1 bash ./ci/run.sh /ci-results /ci-cache
66+ ```
67+
68+ This setup ensures that the CI runs within an isolated Docker environment while maintaining cached files and results across runs.
Original file line number Diff line number Diff line change 1616# # with VULKAN support
1717# GG_BUILD_VULKAN=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
1818#
19+ # # with MUSA support
20+ # GG_BUILD_MUSA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
21+ #
1922
2023if [ -z " $2 " ]; then
2124 echo " usage: $0 <output-dir> <mnt-dir>"
6265if [ ! -z ${GG_BUILD_VULKAN} ]; then
6366 CMAKE_EXTRA=" ${CMAKE_EXTRA} -DGGML_VULKAN=1"
6467fi
68+
69+ if [ ! -z ${GG_BUILD_MUSA} ]; then
70+ # Use qy1 by default (MTT S80)
71+ MUSA_ARCH=${MUSA_ARCH:- 21}
72+ CMAKE_EXTRA=" -DGGML_MUSA=ON -DMUSA_ARCHITECTURES=${MUSA_ARCH} "
73+ fi
6574# # helpers
6675
6776# download a file if it does not exist or if it is outdated
@@ -811,7 +820,7 @@ export LLAMA_LOG_PREFIX=1
811820export LLAMA_LOG_TIMESTAMPS=1
812821
813822if [ -z ${GG_BUILD_LOW_PERF} ]; then
814- # Create symlink: ./llama.cpp/models-mnt -> $MNT/models/models-mnt
823+ # Create symlink: ./llama.cpp/models-mnt -> $MNT/models
815824 rm -rf ${SRC} /models-mnt
816825 mnt_models=${MNT} /models
817826 mkdir -p ${mnt_models}
You can’t perform that action at this time.
0 commit comments