Skip to content

Commit bc4f6cb

Browse files
committed
update cuda & musa dockerfiles
1 parent e0b321b commit bc4f6cb

File tree

4 files changed

+17
-13
lines changed

4 files changed

+17
-13
lines changed

.devops/llama-cli-cuda.Dockerfile

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,16 @@ RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
2323
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
2424
fi && \
2525
cmake -B build -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
26-
cmake --build build --config Release --target llama-cli -j$(nproc)
26+
cmake --build build --config Release --target llama-cli -j$(nproc) && \
27+
mkdir -p /app/lib && \
28+
find build -name "*.so" -exec cp {} /app/lib \;
2729

2830
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
2931

3032
RUN apt-get update && \
3133
apt-get install -y libgomp1
3234

33-
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
34-
COPY --from=build /app/build/src/libllama.so /libllama.so
35-
COPY --from=build /app/build/bin/llama-cli /llama-cli
35+
COPY --from=build /app/lib/ /
36+
COPY --from=build /app/build/bin/llama-cli /
3637

3738
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cli-musa.Dockerfile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,16 @@ WORKDIR /app
1616
COPY . .
1717

1818
RUN cmake -B build -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
19-
cmake --build build --config Release --target llama-cli -j$(nproc)
19+
cmake --build build --config Release --target llama-cli -j$(nproc) && \
20+
mkdir -p /app/lib && \
21+
find build -name "*.so" -exec cp {} /app/lib \;
2022

2123
FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime
2224

2325
RUN apt-get update && \
2426
apt-get install -y libgomp1
2527

26-
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
27-
COPY --from=build /app/build/src/libllama.so /libllama.so
28+
COPY --from=build /app/lib/ /
2829
COPY --from=build /app/build/bin/llama-cli /llama-cli
2930

3031
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-server-cuda.Dockerfile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,16 @@ RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
2323
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
2424
fi && \
2525
cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
26-
cmake --build build --config Release --target llama-server -j$(nproc)
26+
cmake --build build --config Release --target llama-server -j$(nproc) && \
27+
mkdir -p /app/lib && \
28+
find build -name "*.so" -exec cp {} /app/lib \;
2729

2830
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
2931

3032
RUN apt-get update && \
3133
apt-get install -y libcurl4-openssl-dev libgomp1 curl
3234

33-
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
34-
COPY --from=build /app/build/src/libllama.so /libllama.so
35+
COPY --from=build /app/lib/ /
3536
COPY --from=build /app/build/bin/llama-server /llama-server
3637

3738
# Must be set to 0.0.0.0 so it can listen to requests from host machine

.devops/llama-server-musa.Dockerfile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,16 @@ WORKDIR /app
1616
COPY . .
1717

1818
RUN cmake -B build -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
19-
cmake --build build --config Release --target llama-server -j$(nproc)
19+
cmake --build build --config Release --target llama-server -j$(nproc) && \
20+
mkdir -p /app/lib && \
21+
find build -name "*.so" -exec cp {} /app/lib \;
2022

2123
FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime
2224

2325
RUN apt-get update && \
2426
apt-get install -y libcurl4-openssl-dev libgomp1 curl
2527

26-
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so
27-
COPY --from=build /app/build/src/libllama.so /libllama.so
28+
COPY --from=build /app/lib/ /
2829
COPY --from=build /app/build/bin/llama-server /llama-server
2930

3031
# Must be set to 0.0.0.0 so it can listen to requests from host machine

0 commit comments

Comments
 (0)