Skip to content

Commit f4a2528

Browse files
committed
added docker-multi-stage builds
1 parent e52aba5 commit f4a2528

22 files changed

+573
-579
lines changed

.devops/full.Dockerfile renamed to .devops/cpu.Dockerfile

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VA
1414
mkdir -p /app/lib && \
1515
find build -name "*.so" -exec cp {} /app/lib/ \;
1616

17-
FROM ubuntu:$UBUNTU_VERSION as runtime
17+
FROM ubuntu:$UBUNTU_VERSION as full
1818

1919
WORKDIR /app
2020

@@ -36,3 +36,37 @@ COPY --from=build /app/gguf-py /app/gguf-py
3636
ENV LC_ALL=C.utf8
3737

3838
ENTRYPOINT ["/app/tools.sh"]
39+
40+
41+
FROM ubuntu:$UBUNTU_VERSION AS light
42+
43+
WORKDIR /app
44+
45+
RUN apt-get update && \
46+
apt-get install -y libgomp1
47+
48+
COPY --from=build /app/build/bin/llama-cli /app/
49+
COPY --from=build /app/lib/ /app/
50+
51+
ENV LC_ALL=C.utf8
52+
53+
ENTRYPOINT [ "/app/llama-cli" ]
54+
55+
56+
FROM ubuntu:$UBUNTU_VERSION AS server
57+
58+
WORKDIR /app
59+
60+
RUN apt-get update && \
61+
apt-get install -y libcurl4-openssl-dev libgomp1 curl
62+
63+
COPY --from=build /app/build/bin/llama-server /app/
64+
COPY --from=build /app/lib/ /app/
65+
66+
ENV LC_ALL=C.utf8
67+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
68+
ENV LLAMA_ARG_HOST=0.0.0.0
69+
70+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
71+
72+
ENTRYPOINT [ "/app/llama-server" ]

.devops/cuda.Dockerfile

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
ARG UBUNTU_VERSION=22.04
2+
# This needs to generally match the container host's environment.
3+
ARG CUDA_VERSION=12.6.0
4+
# Target the CUDA build image
5+
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6+
7+
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8+
9+
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
10+
11+
# CUDA architecture to build for (defaults to all supported archs)
12+
ARG CUDA_DOCKER_ARCH=default
13+
14+
RUN apt-get update && \
15+
apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1
16+
17+
WORKDIR /app
18+
19+
COPY . .
20+
21+
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
22+
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
23+
fi && \
24+
cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
25+
cmake --build build --config Release -j$(nproc) && \
26+
cp build/bin/* .
27+
28+
RUN mkdir -p /app/lib && \
29+
find build -name "*.so" -exec cp {} /app/lib \;
30+
31+
32+
FROM ${BASE_CUDA_DEV_CONTAINER} AS full
33+
COPY --from=build /app /app
34+
35+
WORKDIR /app
36+
37+
RUN apt-get update \
38+
&& apt-get install -y \
39+
build-essential \
40+
cmake \
41+
python3 \
42+
python3-pip \
43+
git \
44+
libcurl4-openssl-dev \
45+
libgomp1 \
46+
&& pip install --upgrade pip setuptools wheel \
47+
&& pip install -r requirements.txt \
48+
&& apt autoremove -y \
49+
&& apt clean -y \
50+
&& rm -rf /tmp/* /var/tmp/* \
51+
&& find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
52+
&& find /var/cache -type f -delete
53+
54+
ENTRYPOINT ["/app/.devops/tools.sh"]
55+
56+
FROM ${BASE_CUDA_RUN_CONTAINER} AS light
57+
58+
RUN apt-get update \
59+
&& apt-get install -y libgomp1 \
60+
&& apt autoremove -y \
61+
&& apt clean -y \
62+
&& rm -rf /tmp/* /var/tmp/* \
63+
&& find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
64+
&& find /var/cache -type f -delete
65+
66+
COPY --from=build /app/lib/ /app/
67+
COPY --from=build /app/build/bin/llama-cli /app/
68+
69+
WORKDIR /app
70+
71+
ENTRYPOINT [ "/app/llama-cli" ]
72+
73+
FROM ${BASE_CUDA_RUN_CONTAINER} AS server
74+
75+
RUN apt-get update \
76+
&& apt-get install -y libgomp1 curl \
77+
&& apt autoremove -y \
78+
&& apt clean -y \
79+
&& rm -rf /tmp/* /var/tmp/* \
80+
&& find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
81+
&& find /var/cache -type f -delete
82+
83+
COPY --from=build /app/lib/ /app/
84+
COPY --from=build /app/build/bin/llama-server /app/
85+
86+
WORKDIR /app
87+
88+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
89+
ENV LLAMA_ARG_HOST=0.0.0.0
90+
91+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
92+
93+
ENTRYPOINT [ "/app/llama-server" ]

.devops/full-cuda.Dockerfile

Lines changed: 0 additions & 33 deletions
This file was deleted.

.devops/full-musa.Dockerfile

Lines changed: 0 additions & 33 deletions
This file was deleted.

.devops/full-rocm.Dockerfile

Lines changed: 0 additions & 50 deletions
This file was deleted.

.devops/intel.Dockerfile

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
ARG ONEAPI_VERSION=2025.0.0-0-devel-ubuntu22.04
2+
3+
## Build Image
4+
5+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
6+
7+
ARG GGML_SYCL_F16=OFF
8+
RUN apt-get update && \
9+
apt-get install -y git libcurl4-openssl-dev
10+
11+
WORKDIR /app
12+
13+
COPY . .
14+
15+
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
16+
echo "GGML_SYCL_F16 is set" && \
17+
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
18+
fi && \
19+
echo "Building with dynamic libs" && \
20+
cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
21+
cmake --build build --config Release -j$(nproc) && \
22+
cp build/bin/* .
23+
24+
RUN mkdir -p /app/lib && \
25+
find build -name "*.so" -exec cp {} /app/lib \;
26+
27+
##
28+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as full
29+
COPY --from=build /app /app
30+
31+
WORKDIR /app
32+
33+
RUN apt-get update \
34+
&& apt-get install -y \
35+
build-essential \
36+
cmake \
37+
python3 \
38+
python3-pip \
39+
git \
40+
libcurl4-openssl-dev \
41+
libgomp1 \
42+
&& pip install --upgrade pip setuptools wheel \
43+
&& pip install -r requirements.txt \
44+
&& apt autoremove -y \
45+
&& apt clean -y \
46+
&& rm -rf /tmp/* /var/tmp/* \
47+
&& find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
48+
&& find /var/cache -type f -delete
49+
50+
ENTRYPOINT ["/app/.devops/tools.sh"]
51+
52+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS light
53+
54+
RUN apt-get update \
55+
&& apt-get install -y libgomp1 \
56+
&& apt autoremove -y \
57+
&& apt clean -y \
58+
&& rm -rf /tmp/* /var/tmp/* \
59+
&& find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
60+
&& find /var/cache -type f -delete
61+
62+
COPY --from=build /app/lib/ /app/
63+
COPY --from=build /app/build/bin/llama-cli /app/
64+
65+
WORKDIR /app
66+
67+
ENTRYPOINT [ "/app/llama-cli" ]
68+
69+
70+
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS server
71+
72+
RUN apt-get update \
73+
&& apt-get install -y libgomp1 curl \
74+
&& apt autoremove -y \
75+
&& apt clean -y \
76+
&& rm -rf /tmp/* /var/tmp/* \
77+
&& find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
78+
&& find /var/cache -type f -delete
79+
80+
COPY --from=build /app/lib/ /app/
81+
COPY --from=build /app/build/bin/llama-server /app/
82+
83+
WORKDIR /app
84+
85+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
86+
ENV LLAMA_ARG_HOST=0.0.0.0
87+
88+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
89+
90+
ENTRYPOINT [ "/app/llama-server" ]

.devops/llama-cli-cuda.Dockerfile

Lines changed: 0 additions & 38 deletions
This file was deleted.

0 commit comments

Comments
 (0)