forked from nuance1979/llama-server
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDockerfile
More file actions
57 lines (42 loc) · 1.5 KB
/
Dockerfile
File metadata and controls
57 lines (42 loc) · 1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
ARG PYTHON_VERSION=3.9
FROM python:${PYTHON_VERSION}-slim
WORKDIR /app
# Install git
RUN apt-get update && \
apt-get install -y git build-essential cmake && \
apt-get clean
RUN python -m pip install --upgrade pip
RUN python -m pip install git+https://github.com/AwMalka/llama-server.git
RUN python -m pip install git+https://github.com/AwMalka/pyllamacpp.git --upgrade
RUN python -m pip cache purge
# Expose the port the app will run on
EXPOSE 8000
# Start the application
CMD ["llama-server", "--models-yml", "/models/models.yml", "--model-id", "gpt4all"]
# ARG PYTHON_VERSION=3.9
# FROM python:${PYTHON_VERSION}-slim
# WORKDIR /app
# # Install git
# RUN apt-get update && \
# apt-get install -y git && \
# apt-get clean
# # Copy the contents of the current folder into the Docker container
# COPY . /app
# RUN python -m pip install --upgrade pip
# # Install the llama-server package from the local directory
# RUN python -m pip install . && \
# python -m pip cache purge
# # Expose the port the app will run on
# EXPOSE 8000
# # Start the application
# CMD ["llama-server", "--models-yml", "/models/models.yml", "--model-id", "gpt4all"]
# ARG PYTHON_VERSION=3.9
# FROM python:${PYTHON_VERSION}-slim
# WORKDIR /app
# RUN python -m pip install --upgrade pip
# RUN python -m pip install llama-server && \
# python -m pip cache purge
# # Expose the port the app will run on
# EXPOSE 8000
# # Start the application
# CMD ["llama-server", "--models-yml", "/models/models.yml", "--model-id", "gpt4all"]