-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathdeployment.yaml
More file actions
88 lines (88 loc) · 1.76 KB
/
deployment.yaml
File metadata and controls
88 lines (88 loc) · 1.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
apiVersion: apps/v1
kind: Deployment
metadata:
name: llama-server
namespace: default
labels:
app: llama-server
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: llama-server
template:
metadata:
labels:
app: llama-server
spec:
containers:
- name: llama-server
image: localhost:5000/llama-server:latest
args:
- --model
- /workspace/models/Qwen3.5-35B-A3B-GGUF/Qwen3.5-35B-A3B-Q8_0.gguf
- --port
- "8080"
- --host
- "0.0.0.0"
- --temp
- "0.6"
- --top-p
- "0.95"
- --top-k
- "20"
- --cache-type-k
- q8_0
- --cache-type-v
- q8_0
- --ctx-size
- "262144"
- -ngl
- "99"
- --tensor-split
- "32,8"
env:
- name: NVIDIA_VISIBLE_DEVICES
value: "${GPU_UUID}"
- name: CUDA_VISIBLE_DEVICES
value: "0,1"
resources:
limits:
nvidia.com/gpu: "2"
memory: 12Gi
requests:
nvidia.com/gpu: "2"
memory: 8Gi
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: workspace
mountPath: /workspace
- name: dshm
mountPath: /dev/shm
volumes:
- name: workspace
hostPath:
path: /home/akshay/llama-workspace
type: DirectoryOrCreate
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: llama-server
namespace: default
spec:
type: NodePort
selector:
app: llama-server
ports:
- port: 8080
targetPort: 8080
nodePort: 30080