Skip to content

Commit 124c222

Browse files
committed
Merge branch 'master' into compilade/refactor-kv-cache
Also begin reverting some implicit state rollback code.
2 parents 63ac36b + edc2656 commit 124c222

File tree

201 files changed

+18101
-13153
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

201 files changed

+18101
-13153
lines changed

.devops/full-musa.Dockerfile

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
ARG UBUNTU_VERSION=22.04
2+
# This needs to generally match the container host's environment.
3+
ARG MUSA_VERSION=rc3.1.0
4+
# Target the MUSA build image
5+
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6+
7+
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
8+
9+
RUN apt-get update && \
10+
apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1
11+
12+
COPY requirements.txt requirements.txt
13+
COPY requirements requirements
14+
15+
RUN pip install --upgrade pip setuptools wheel \
16+
&& pip install -r requirements.txt
17+
18+
WORKDIR /app
19+
20+
COPY . .
21+
22+
RUN cmake -B build -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
23+
cmake --build build --config Release -j$(nproc) && \
24+
cp build/bin/* .
25+
26+
ENTRYPOINT ["/app/.devops/tools.sh"]

.devops/full-rocm.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
1313
# This is mostly tied to rocBLAS supported archs.
14-
ARG ROCM_DOCKER_ARCH=\
14+
ARG ROCM_DOCKER_ARCH="\
1515
gfx803 \
1616
gfx900 \
1717
gfx906 \
@@ -21,7 +21,7 @@ ARG ROCM_DOCKER_ARCH=\
2121
gfx1030 \
2222
gfx1100 \
2323
gfx1101 \
24-
gfx1102
24+
gfx1102"
2525

2626
COPY requirements.txt requirements.txt
2727
COPY requirements requirements
@@ -34,7 +34,7 @@ WORKDIR /app
3434
COPY . .
3535

3636
# Set nvcc architecture
37-
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
37+
ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
3939
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang

.devops/llama-cli-musa.Dockerfile

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
ARG UBUNTU_VERSION=22.04
2+
# This needs to generally match the container host's environment.
3+
ARG MUSA_VERSION=rc3.1.0
4+
# Target the MUSA build image
5+
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6+
# Target the MUSA runtime image
7+
ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8+
9+
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
10+
11+
RUN apt-get update && \
12+
apt-get install -y build-essential git cmake
13+
14+
WORKDIR /app
15+
16+
COPY . .
17+
18+
RUN cmake -B build -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
19+
cmake --build build --config Release --target llama-cli -j$(nproc)
20+
21+
FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime
22+
23+
RUN apt-get update && \
24+
apt-get install -y libgomp1
25+
26+
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
27+
COPY --from=build /app/build/src/libllama.so /libllama.so
28+
COPY --from=build /app/build/bin/llama-cli /llama-cli
29+
30+
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cli-rocm.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
1313
# This is mostly tied to rocBLAS supported archs.
14-
ARG ROCM_DOCKER_ARCH=\
14+
ARG ROCM_DOCKER_ARCH="\
1515
gfx803 \
1616
gfx900 \
1717
gfx906 \
@@ -21,7 +21,7 @@ ARG ROCM_DOCKER_ARCH=\
2121
gfx1030 \
2222
gfx1100 \
2323
gfx1101 \
24-
gfx1102
24+
gfx1102"
2525

2626
COPY requirements.txt requirements.txt
2727
COPY requirements requirements
@@ -34,7 +34,7 @@ WORKDIR /app
3434
COPY . .
3535

3636
# Set nvcc architecture
37-
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
37+
ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
3939
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
ARG UBUNTU_VERSION=22.04
2+
# This needs to generally match the container host's environment.
3+
ARG MUSA_VERSION=rc3.1.0
4+
# Target the MUSA build image
5+
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6+
# Target the MUSA runtime image
7+
ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8+
9+
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
10+
11+
RUN apt-get update && \
12+
apt-get install -y build-essential git cmake libcurl4-openssl-dev
13+
14+
WORKDIR /app
15+
16+
COPY . .
17+
18+
RUN cmake -B build -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
19+
cmake --build build --config Release --target llama-server -j$(nproc)
20+
21+
FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime
22+
23+
RUN apt-get update && \
24+
apt-get install -y libcurl4-openssl-dev libgomp1 curl
25+
26+
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
27+
COPY --from=build /app/build/src/libllama.so /libllama.so
28+
COPY --from=build /app/build/bin/llama-server /llama-server
29+
30+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
31+
ENV LLAMA_ARG_HOST=0.0.0.0
32+
33+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
34+
35+
ENTRYPOINT [ "/llama-server" ]

.devops/llama-server-rocm.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
1313
# This is mostly tied to rocBLAS supported archs.
14-
ARG ROCM_DOCKER_ARCH=\
14+
ARG ROCM_DOCKER_ARCH="\
1515
gfx803 \
1616
gfx900 \
1717
gfx906 \
@@ -21,7 +21,7 @@ ARG ROCM_DOCKER_ARCH=\
2121
gfx1030 \
2222
gfx1100 \
2323
gfx1101 \
24-
gfx1102
24+
gfx1102"
2525

2626
COPY requirements.txt requirements.txt
2727
COPY requirements requirements
@@ -34,7 +34,7 @@ WORKDIR /app
3434
COPY . .
3535

3636
# Set nvcc architecture
37-
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
37+
ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
3939
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang

.dockerignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
*.o
22
*.a
33
.cache/
4-
.git/
4+
# Do not ignore .git directory, otherwise the reported build number will always be 0
55
.github/
66
.gitignore
77
.vs/

.github/workflows/bench.yml.disabled

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,10 @@ on:
2727
push:
2828
branches:
2929
- master
30-
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.c', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp']
30+
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp']
3131
pull_request_target:
3232
types: [opened, synchronize, reopened]
33-
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.c', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp']
33+
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp']
3434
schedule:
3535
- cron: '04 2 * * *'
3636

.github/workflows/build.yml

Lines changed: 77 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,18 @@ concurrency:
1919
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
2020
cancel-in-progress: true
2121

22+
# Fine-grant permission
23+
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
24+
permissions:
25+
contents: write # for creating release
26+
2227
env:
2328
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
2429
GGML_NLOOP: 3
2530
GGML_N_THREADS: 1
31+
LLAMA_LOG_COLORS: 1
32+
LLAMA_LOG_PREFIX: 1
33+
LLAMA_LOG_TIMESTAMPS: 1
2634

2735
jobs:
2836
macOS-latest-cmake-arm64:
@@ -953,6 +961,7 @@ jobs:
953961
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl7.dll" ./build/bin
954962
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
955963
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
964+
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
956965
echo "cp oneAPI running time dll files to ./build/bin done"
957966
7z a llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/*
958967
@@ -964,8 +973,45 @@ jobs:
964973
name: llama-bin-win-sycl-x64.zip
965974

966975
windows-latest-cmake-hip:
976+
if: ${{ github.event.inputs.create_release != 'true' }}
977+
runs-on: windows-latest
978+
979+
steps:
980+
- name: Clone
981+
id: checkout
982+
uses: actions/checkout@v4
983+
984+
- name: Install
985+
id: depends
986+
run: |
987+
$ErrorActionPreference = "Stop"
988+
write-host "Downloading AMD HIP SDK Installer"
989+
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
990+
write-host "Installing AMD HIP SDK"
991+
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
992+
write-host "Completed AMD HIP SDK installation"
993+
994+
- name: Verify ROCm
995+
id: verify
996+
run: |
997+
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
998+
999+
- name: Build
1000+
id: cmake_build
1001+
run: |
1002+
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
1003+
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
1004+
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIPBLAS=ON -DCMAKE_BUILD_TYPE=Release -DGGML_RPC=ON
1005+
cmake --build build -j ${env:NUMBER_OF_PROCESSORS}
1006+
1007+
windows-latest-cmake-hip-release:
1008+
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
9671009
runs-on: windows-latest
9681010

1011+
strategy:
1012+
matrix:
1013+
gpu_target: [gfx1100, gfx1101, gfx1030]
1014+
9691015
steps:
9701016
- name: Clone
9711017
id: checkout
@@ -991,8 +1037,36 @@ jobs:
9911037
run: |
9921038
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
9931039
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
994-
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIPBLAS=ON
995-
cmake --build build --config Release
1040+
cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIPBLAS=ON -DCMAKE_BUILD_TYPE=Release -DAMDGPU_TARGETS=${{ matrix.gpu_target }} -DGGML_RPC=ON
1041+
cmake --build build -j ${env:NUMBER_OF_PROCESSORS}
1042+
md "build\bin\rocblas\library\"
1043+
cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\"
1044+
cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\"
1045+
cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\"
1046+
1047+
- name: Determine tag name
1048+
id: tag
1049+
shell: bash
1050+
run: |
1051+
BUILD_NUMBER="$(git rev-list --count HEAD)"
1052+
SHORT_HASH="$(git rev-parse --short=7 HEAD)"
1053+
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
1054+
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
1055+
else
1056+
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
1057+
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
1058+
fi
1059+
1060+
- name: Pack artifacts
1061+
id: pack_artifacts
1062+
run: |
1063+
7z a llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip .\build\bin\*
1064+
1065+
- name: Upload artifacts
1066+
uses: actions/upload-artifact@v4
1067+
with:
1068+
path: llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip
1069+
name: llama-bin-win-hip-x64-${{ matrix.gpu_target }}.zip
9961070

9971071
ios-xcode-build:
9981072
runs-on: macos-latest
@@ -1057,6 +1131,7 @@ jobs:
10571131
- macOS-latest-cmake
10581132
- windows-latest-cmake
10591133
- windows-latest-cmake-cuda
1134+
- windows-latest-cmake-hip-release
10601135
- macOS-latest-cmake-arm64
10611136
- macOS-latest-cmake-x64
10621137

.github/workflows/close-issue.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,11 @@ on:
33
schedule:
44
- cron: "42 0 * * *"
55

6+
# Fine-grant permission
7+
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
8+
permissions:
9+
issues: write
10+
611
jobs:
712
close-issues:
813
runs-on: ubuntu-latest

0 commit comments

Comments
 (0)