Skip to content

Commit a7dd86a

Browse files
Merge branch 'master' into master
2 parents 592e2f5 + 92bb84f commit a7dd86a

File tree

461 files changed

+65208
-25472
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

461 files changed

+65208
-25472
lines changed

.devops/s390x.Dockerfile

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,9 @@ RUN --mount=type=cache,target=/root/.ccache \
2424
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
2525
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
2626
-DLLAMA_BUILD_TESTS=OFF \
27-
-DGGML_BACKEND_DL=OFF \
2827
-DGGML_NATIVE=OFF \
28+
-DGGML_BACKEND_DL=ON \
29+
-DGGML_CPU_ALL_VARIANTS=ON \
2930
-DGGML_BLAS=ON \
3031
-DGGML_BLAS_VENDOR=OpenBLAS && \
3132
cmake --build build --config Release -j $(nproc) && \
@@ -103,6 +104,7 @@ FROM base AS light
103104
WORKDIR /llama.cpp/bin
104105

105106
# Copy llama.cpp binaries and libraries
107+
COPY --from=collector /llama.cpp/bin/*.so /llama.cpp/bin
106108
COPY --from=collector /llama.cpp/bin/llama-cli /llama.cpp/bin
107109

108110
ENTRYPOINT [ "/llama.cpp/bin/llama-cli" ]
@@ -116,6 +118,7 @@ ENV LLAMA_ARG_HOST=0.0.0.0
116118
WORKDIR /llama.cpp/bin
117119

118120
# Copy llama.cpp binaries and libraries
121+
COPY --from=collector /llama.cpp/bin/*.so /llama.cpp/bin
119122
COPY --from=collector /llama.cpp/bin/llama-server /llama.cpp/bin
120123

121124
EXPOSE 8080

.github/labeler.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,10 @@ ggml:
7676
- changed-files:
7777
- any-glob-to-any-file:
7878
- ggml/**
79+
model:
80+
- changed-files:
81+
- any-glob-to-any-file:
82+
- src/models/**
7983
nix:
8084
- changed-files:
8185
- any-glob-to-any-file:

.github/workflows/build-linux-cross.yml

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -4,49 +4,49 @@ on:
44
workflow_call:
55

66
jobs:
7-
ubuntu-24-riscv64-cpu-cross:
8-
runs-on: ubuntu-24.04
7+
# ubuntu-24-riscv64-cpu-cross:
8+
# runs-on: ubuntu-24.04
99

10-
steps:
11-
- uses: actions/checkout@v4
12-
- name: Setup Riscv
13-
run: |
14-
sudo dpkg --add-architecture riscv64
10+
# steps:
11+
# - uses: actions/checkout@v4
12+
# - name: Setup Riscv
13+
# run: |
14+
# sudo dpkg --add-architecture riscv64
1515

16-
# Add arch-specific repositories for non-amd64 architectures
17-
cat << EOF | sudo tee /etc/apt/sources.list.d/riscv64-ports.list
18-
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
19-
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
20-
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
21-
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
22-
EOF
16+
# # Add arch-specific repositories for non-amd64 architectures
17+
# cat << EOF | sudo tee /etc/apt/sources.list.d/riscv64-ports.list
18+
# deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
19+
# deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
20+
# deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
21+
# deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
22+
# EOF
2323

24-
sudo apt-get update || true ;# Prevent failure due to missing URLs.
24+
# sudo apt-get update || true ;# Prevent failure due to missing URLs.
2525

26-
sudo apt-get install -y --no-install-recommends \
27-
build-essential \
28-
gcc-14-riscv64-linux-gnu \
29-
g++-14-riscv64-linux-gnu
26+
# sudo apt-get install -y --no-install-recommends \
27+
# build-essential \
28+
# gcc-14-riscv64-linux-gnu \
29+
# g++-14-riscv64-linux-gnu
3030

31-
- name: Build
32-
run: |
33-
cmake -B build -DLLAMA_CURL=OFF \
34-
-DCMAKE_BUILD_TYPE=Release \
35-
-DGGML_OPENMP=OFF \
36-
-DLLAMA_BUILD_EXAMPLES=ON \
37-
-DLLAMA_BUILD_TOOLS=ON \
38-
-DLLAMA_BUILD_TESTS=OFF \
39-
-DCMAKE_SYSTEM_NAME=Linux \
40-
-DCMAKE_SYSTEM_PROCESSOR=riscv64 \
41-
-DCMAKE_C_COMPILER=riscv64-linux-gnu-gcc-14 \
42-
-DCMAKE_CXX_COMPILER=riscv64-linux-gnu-g++-14 \
43-
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
44-
-DCMAKE_FIND_ROOT_PATH=/usr/lib/riscv64-linux-gnu \
45-
-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \
46-
-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \
47-
-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
31+
# - name: Build
32+
# run: |
33+
# cmake -B build -DLLAMA_CURL=OFF \
34+
# -DCMAKE_BUILD_TYPE=Release \
35+
# -DGGML_OPENMP=OFF \
36+
# -DLLAMA_BUILD_EXAMPLES=ON \
37+
# -DLLAMA_BUILD_TOOLS=ON \
38+
# -DLLAMA_BUILD_TESTS=OFF \
39+
# -DCMAKE_SYSTEM_NAME=Linux \
40+
# -DCMAKE_SYSTEM_PROCESSOR=riscv64 \
41+
# -DCMAKE_C_COMPILER=riscv64-linux-gnu-gcc-14 \
42+
# -DCMAKE_CXX_COMPILER=riscv64-linux-gnu-g++-14 \
43+
# -DCMAKE_POSITION_INDEPENDENT_CODE=ON \
44+
# -DCMAKE_FIND_ROOT_PATH=/usr/lib/riscv64-linux-gnu \
45+
# -DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \
46+
# -DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \
47+
# -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
4848

49-
cmake --build build --config Release -j $(nproc)
49+
# cmake --build build --config Release -j $(nproc)
5050

5151
# ubuntu-24-riscv64-vulkan-cross:
5252
# runs-on: ubuntu-24.04

.github/workflows/build.yml

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,39 @@ jobs:
387387
cd build
388388
ctest -L main --verbose
389389
390+
ubuntu-24-cmake-vulkan-deb:
391+
runs-on: ubuntu-24.04
392+
393+
steps:
394+
- name: Clone
395+
id: checkout
396+
uses: actions/checkout@v4
397+
398+
- name: ccache
399+
uses: ggml-org/[email protected]
400+
with:
401+
key: ubuntu-24-cmake-vulkan-deb
402+
evict-old-files: 1d
403+
404+
- name: Dependencies
405+
id: depends
406+
run: |
407+
sudo apt-get install -y glslc libvulkan-dev libcurl4-openssl-dev
408+
409+
- name: Configure
410+
id: cmake_configure
411+
run: |
412+
cmake -B build \
413+
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
414+
-DGGML_BACKEND_DL=ON \
415+
-DGGML_CPU_ALL_VARIANTS=ON \
416+
-DGGML_VULKAN=ON
417+
418+
- name: Build
419+
id: cmake_build
420+
run: |
421+
cmake --build build -j $(nproc)
422+
390423
ubuntu-24-cmake-vulkan:
391424
runs-on: ubuntu-24.04
392425

@@ -1272,6 +1305,81 @@ jobs:
12721305
cd examples/llama.android
12731306
./gradlew build --no-daemon
12741307
1308+
android-ndk-build:
1309+
runs-on: ubuntu-latest
1310+
1311+
env:
1312+
OPENCL_VERSION: 2025.07.22
1313+
1314+
strategy:
1315+
matrix:
1316+
include:
1317+
- build: 'arm64-cpu'
1318+
defines: '-D ANDROID_ABI=arm64-v8a -D ANDROID_PLATFORM=android-31 -D CMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -D GGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm -G Ninja -D LLAMA_CURL=OFF -D GGML_OPENMP=OFF'
1319+
- build: 'arm64-snapdragon'
1320+
defines: '--preset arm64-android-snapdragon-release'
1321+
1322+
steps:
1323+
- name: Clone
1324+
id: checkout
1325+
uses: actions/checkout@v4
1326+
1327+
- name: Install OpenCL Headers and Libs
1328+
id: install_opencl
1329+
if: ${{ matrix.build == 'arm64-snapdragon' }}
1330+
run: |
1331+
mkdir opencl
1332+
curl -L -o opencl/clhpp.tar.gz https://github.com/KhronosGroup/OpenCL-CLHPP/archive/refs/tags/v${OPENCL_VERSION}.tar.gz
1333+
curl -L -o opencl/headers.tar.gz https://github.com/KhronosGroup/OpenCL-Headers/archive/refs/tags/v${OPENCL_VERSION}.tar.gz
1334+
curl -L -o opencl/icd-loader.tar.gz https://github.com/KhronosGroup/OpenCL-ICD-Loader/archive/refs/tags/v${OPENCL_VERSION}.tar.gz
1335+
tar -xaf opencl/headers.tar.gz -C opencl
1336+
tar -xaf opencl/clhpp.tar.gz -C opencl
1337+
tar -xaf opencl/icd-loader.tar.gz -C opencl
1338+
sudo cp -r opencl/OpenCL-Headers-${OPENCL_VERSION}/CL ${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include
1339+
sudo cp -r opencl/OpenCL-CLHPP-${OPENCL_VERSION}/include/CL/* ${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include/CL
1340+
cd opencl/OpenCL-ICD-Loader-${OPENCL_VERSION}
1341+
cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DOPENCL_ICD_LOADER_HEADERS_DIR=${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=31 -DANDROID_STL=c++_shared
1342+
cmake --build build
1343+
sudo cp build/libOpenCL.so ${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android
1344+
rm -rf opencl
1345+
1346+
- name: Install Hexagon SDK
1347+
id: install_hexsdk
1348+
if: ${{ matrix.build == 'arm64-snapdragon' }}
1349+
env:
1350+
HEXSDK_VER: 6.4.0.2
1351+
HEXTLS_VER: 19.0.04
1352+
run: |
1353+
curl -L -o hex-sdk.tar.gz https://github.com/snapdragon-toolchain/hexagon-sdk/releases/download/v$HEXSDK_VER/hexagon-sdk-v$HEXSDK_VER-amd64-lnx.tar.xz
1354+
mkdir hex-sdk
1355+
tar -xaf hex-sdk.tar.gz -C hex-sdk
1356+
ls -l hex-sdk
1357+
sudo mv hex-sdk /opt/hexagon
1358+
echo "HEXAGON_SDK_ROOT=/opt/hexagon/$HEXSDK_VER" >> "$GITHUB_ENV"
1359+
echo "HEXAGON_TOOLS_ROOT=/opt/hexagon/$HEXSDK_VER/tools/HEXAGON_Tools/$HEXTLS_VER" >> "$GITHUB_ENV"
1360+
echo "DEFAULT_HLOS_ARCH=64" >> "$GITHUB_ENV"
1361+
echo "DEFAULT_TOOLS_VARIANT=toolv19" >> "$GITHUB_ENV"
1362+
echo "DEFAULT_NO_QURT_INC=0" >> "$GITHUB_ENV"
1363+
echo "DEFAULT_DSP_ARCH=v73" >> "$GITHUB_ENV"
1364+
1365+
- name: Update CMake presets
1366+
id: update_presets
1367+
if: ${{ matrix.build == 'arm64-snapdragon' }}
1368+
run: |
1369+
cp docs/backend/hexagon/CMakeUserPresets.json .
1370+
1371+
- name: Build
1372+
id: ndk_build
1373+
run: |
1374+
cmake ${{ matrix.defines }} -B build
1375+
cmake --build build
1376+
cmake --install build --prefix pkg-adb/llama.cpp
1377+
1378+
- name: Test
1379+
id: cmake_test
1380+
run: |
1381+
echo "FIXME: test on devices"
1382+
12751383
openEuler-latest-cmake-cann:
12761384
if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }}
12771385
defaults:

.github/workflows/docker.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ jobs:
4040
# https://github.com/ggml-org/llama.cpp/issues/11888
4141
#- { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: false }
4242
- { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false, runs_on: "ubuntu-22.04" }
43-
- { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false, runs_on: "ubuntu-22.04" }
43+
- { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true, runs_on: "ubuntu-22.04" }
4444
- { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true, runs_on: "ubuntu-22.04" }
4545
- { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true, runs_on: "ubuntu-22.04" }
4646
- { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false, runs_on: "ubuntu-22.04" }

.github/workflows/release.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,8 @@ jobs:
134134
include:
135135
- build: 'x64'
136136
os: ubuntu-22.04
137+
- build: 's390x'
138+
os: ubuntu-24.04-s390x
137139
# GGML_BACKEND_DL and GGML_CPU_ALL_VARIANTS are not currently supported on arm
138140
# - build: 'arm64'
139141
# os: ubuntu-22.04-arm

.github/workflows/update-ops-docs.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,12 @@ name: Update Operations Documentation
33
on:
44
push:
55
paths:
6+
- 'docs/ops.md'
67
- 'docs/ops/**'
78
- 'scripts/create_ops_docs.py'
89
pull_request:
910
paths:
11+
- 'docs/ops.md'
1012
- 'docs/ops/**'
1113
- 'scripts/create_ops_docs.py'
1214

CODEOWNERS

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@
5555
/ggml/src/ggml-cuda/common.cuh @slaren
5656
/ggml/src/ggml-cuda/fattn* @JohannesGaessler
5757
/ggml/src/ggml-cuda/ggml-cuda.cu @slaren
58-
/ggml/src/ggml-cuda/mmf.* @JohannesGaessler
58+
/ggml/src/ggml-cuda/mmf.* @JohannesGaessler @am17an
5959
/ggml/src/ggml-cuda/mmq.* @JohannesGaessler
6060
/ggml/src/ggml-cuda/mmvf.* @JohannesGaessler
6161
/ggml/src/ggml-cuda/mmvq.* @JohannesGaessler
@@ -65,6 +65,7 @@
6565
/ggml/src/ggml-impl.h @ggerganov @slaren
6666
/ggml/src/ggml-metal/ @ggerganov
6767
/ggml/src/ggml-opencl/ @lhez @max-krasnyansky
68+
/ggml/src/ggml-hexagon/ @max-krasnyansky @lhez
6869
/ggml/src/ggml-opt.cpp @JohannesGaessler
6970
/ggml/src/ggml-quants.* @ggerganov
7071
/ggml/src/ggml-rpc/ @rgerganov
@@ -88,6 +89,7 @@
8889
/src/llama-model-loader.* @slaren
8990
/src/llama-model.* @CISC
9091
/src/llama-vocab.* @CISC
92+
/src/models/ @CISC
9193
/tests/ @ggerganov
9294
/tests/test-backend-ops.cpp @slaren
9395
/tests/test-thread-safety.cpp @slaren

README.md

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,13 @@ LLM inference in C/C++
1717

1818
## Hot topics
1919

20-
- **[guide : running gpt-oss with llama.cpp](https://github.com/ggml-org/llama.cpp/discussions/15396)**
21-
- **[[FEEDBACK] Better packaging for llama.cpp to support downstream consumers 🤗](https://github.com/ggml-org/llama.cpp/discussions/15313)**
20+
- **[guide : using the new WebUI of llama.cpp](https://github.com/ggml-org/llama.cpp/discussions/16938)**
21+
- [guide : running gpt-oss with llama.cpp](https://github.com/ggml-org/llama.cpp/discussions/15396)
22+
- [[FEEDBACK] Better packaging for llama.cpp to support downstream consumers 🤗](https://github.com/ggml-org/llama.cpp/discussions/15313)
2223
- Support for the `gpt-oss` model with native MXFP4 format has been added | [PR](https://github.com/ggml-org/llama.cpp/pull/15091) | [Collaboration with NVIDIA](https://blogs.nvidia.com/blog/rtx-ai-garage-openai-oss) | [Comment](https://github.com/ggml-org/llama.cpp/discussions/15095)
23-
- Hot PRs: [All](https://github.com/ggml-org/llama.cpp/pulls?q=is%3Apr+label%3Ahot+) | [Open](https://github.com/ggml-org/llama.cpp/pulls?q=is%3Apr+label%3Ahot+is%3Aopen)
2424
- Multimodal support arrived in `llama-server`: [#12898](https://github.com/ggml-org/llama.cpp/pull/12898) | [documentation](./docs/multimodal.md)
2525
- VS Code extension for FIM completions: https://github.com/ggml-org/llama.vscode
2626
- Vim/Neovim plugin for FIM completions: https://github.com/ggml-org/llama.vim
27-
- Introducing GGUF-my-LoRA https://github.com/ggml-org/llama.cpp/discussions/10123
2827
- Hugging Face Inference Endpoints now support GGUF out of the box! https://github.com/ggml-org/llama.cpp/discussions/9669
2928
- Hugging Face GGUF editor: [discussion](https://github.com/ggml-org/llama.cpp/discussions/9268) | [tool](https://huggingface.co/spaces/CISCai/gguf-editor)
3029

@@ -84,6 +83,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
8483
- [X] [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1)
8584
- [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral)
8685
- [x] [DBRX](https://huggingface.co/databricks/dbrx-instruct)
86+
- [x] [Jamba](https://huggingface.co/ai21labs)
8787
- [X] [Falcon](https://huggingface.co/models?search=tiiuae/falcon)
8888
- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2)
8989
- [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne)
@@ -138,6 +138,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
138138
- [x] [Ling models](https://huggingface.co/collections/inclusionAI/ling-67c51c85b34a7ea0aba94c32)
139139
- [x] [LFM2 models](https://huggingface.co/collections/LiquidAI/lfm2-686d721927015b2ad73eaa38)
140140
- [x] [Hunyuan models](https://huggingface.co/collections/tencent/hunyuan-dense-model-6890632cda26b19119c9c5e7)
141+
- [x] [BailingMoeV2 (Ring/Ling 2.0) models](https://huggingface.co/collections/inclusionAI/ling-v2-68bf1dd2fc34c306c1fa6f86)
141142

142143
#### Multimodal
143144

@@ -187,6 +188,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
187188
- Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift)
188189
- Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama)
189190
- Delphi [Embarcadero/llama-cpp-delphi](https://github.com/Embarcadero/llama-cpp-delphi)
191+
- Go (no CGo needed): [hybridgroup/yzma](https://github.com/hybridgroup/yzma)
190192

191193
</details>
192194

@@ -278,6 +280,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
278280
| [IBM zDNN](docs/backend/zDNN.md) | IBM Z & LinuxONE |
279281
| [WebGPU [In Progress]](docs/build.md#webgpu) | All |
280282
| [RPC](https://github.com/ggml-org/llama.cpp/tree/master/tools/rpc) | All |
283+
| [Hexagon [In Progress]](docs/backend/hexagon/README.md) | Snapdragon |
281284

282285
## Obtaining and quantizing models
283286

ci/run.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ if [ ! -z ${GG_BUILD_ROCM} ]; then
7575
exit 1
7676
fi
7777

78-
CMAKE_EXTRA="${CMAKE_EXTRA} -DAMDGPU_TARGETS=${GG_BUILD_AMDGPU_TARGETS}"
78+
CMAKE_EXTRA="${CMAKE_EXTRA} -DGPU_TARGETS=${GG_BUILD_AMDGPU_TARGETS}"
7979
fi
8080

8181
if [ ! -z ${GG_BUILD_SYCL} ]; then

0 commit comments

Comments
 (0)