forked from Context-Engine-AI/Context-Engine
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDockerfile.llamacpp
More file actions
21 lines (20 loc) · 1.14 KB
/
Dockerfile.llamacpp
File metadata and controls
21 lines (20 loc) · 1.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# Optional: build llama.cpp server with default settings
# Production note: choose a tiny GGUF model (e.g., tinyllama) in ./models/model.gguf
FROM debian:bookworm-slim AS build
RUN apt-get update && apt-get install -y --no-install-recommends \
git build-essential cmake curl ca-certificates && rm -rf /var/lib/apt/lists/*
WORKDIR /src
RUN git clone --depth=1 https://github.com/ggerganov/llama.cpp.git
WORKDIR /src/llama.cpp
RUN cmake -S . -B build -DLLAMA_BUILD_SERVER=ON && cmake --build build -j
FROM debian:bookworm-slim
ARG MODEL_URL=""
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates curl && rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY --from=build /src/llama.cpp/build/bin/server /app/server
# Optionally bake a model into the image if MODEL_URL is provided
RUN mkdir -p /models \
&& if [ -n "$MODEL_URL" ]; then echo "Fetching model: $MODEL_URL" && curl -L --fail --retry 3 -C - "$MODEL_URL" -o /models/model.gguf; else echo "No MODEL_URL provided; expecting host volume /models"; fi
EXPOSE 8080
ENTRYPOINT ["/app/server", "--model", "/models/model.gguf", "--host", "0.0.0.0", "--port", "8080", "--no-warmup"]