-
Notifications
You must be signed in to change notification settings - Fork 382
Expand file tree
/
Copy pathvalues.yaml
More file actions
883 lines (775 loc) · 34.6 KB
/
values.yaml
File metadata and controls
883 lines (775 loc) · 34.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
# -- Default values for llmstack helm chart
# -- Declare variables to be passed into your templates.
# -- Serving engine configuratoon
servingEngineSpec:
enableEngine: true
# -- Customized labels for the serving engine deployment
labels:
environment: "test"
release: "test"
# -- Extra service ports for models
extraPorts: []
# vllmApiKey: (optional) api key for securing the vLLM models. Can be either:
# - A string containing the token directly (will be stored in a generated secret)
# - An object referencing an existing secret:
# secretName: "my-existing-secret"
# secretKey: "vllm-api-key"
#
# modelSpec - configuring multiple serving engines deployments that runs different models
# Each entry in the modelSpec array should contain the following fields:
# - annotations: (Optional, map) The annotations to add to the deployment, e.g., {model: "opt125m"}
# - serviceAccountName: (Optional, string) The name of the service account to use for the deployment, e.g., "vllm-service-account"
# - priorityClassName: (Optional, string) The name of the priority class name for the deployment, e.g., "high-priority"
# - runtimeClassName: (Optional, string) Runtime class for the pod, e.g., "nvidia". If not specified, falls back to servingEngineSpec.runtimeClassName
# - podAnnotations: (Optional, map) The annotations to add to the pod, e.g., {model: "opt125m"}
# - name: (string) The name of the model, e.g., "example-model"
# - repository: (string) The repository of the model, e.g., "vllm/vllm-openai"
# - tag: (string) The tag of the model, e.g., "latest"
# - imagePullSecret: (Optional, string) Name of secret with credentials to private container repository, e.g. "secret"
# - modelURL: (string) The URL of the model, e.g., "facebook/opt-125m"
# - chatTemplate: (Optional, string) Chat template (Jinga2) specifying tokenizer configuration, e.g. "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'Question:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'system' %}\n{{ 'System:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Answer:\n' + message['content'] + '\n\n' }}{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Answer:\n' }}{% endif %}{% endfor %}"
#
# - replicaCount: (int) The number of replicas for the model, e.g. 1
# - resources: (object) a resources block containing requests and limits. If specified, this takes priority over simplified resource fields (requestCPU, etc.).
# - requestCPU: (int) The number of CPUs requested for the model, e.g. 6
# - requestMemory: (string) The amount of memory requested for the model, e.g., "16Gi"
# - requestGPU: (int) The number of GPUs requested for the model, e.g., 1
# - requestGPUType: (Optional, string) The type of GPU requested, e.g., "nvidia.com/mig-4g.71gb". If not specified, defaults to "nvidia.com/gpu"
# - requestGPUMem: (Optional, string) Requires HAMi. The amount of GPU memory requested, e.g., 3000. Each unit equals to 1M. https://project-hami.io/docs/userguide/NVIDIA-device/specify-device-memory-usage
# - requestGPUMemPercentage: (Optional, string) Requires HAMi. The percentage of GPU memory requested, e.g., "80". It cannot be used together with requestGPUMem.
# - requestGPUCores: (Optional, string) Requires HAMi. The percentage of GPU cores requested, e.g., "10". Each unit equals to 1% device cores. https://project-hami.io/docs/userguide/NVIDIA-device/specify-device-core-usage
# - limitCPU: (Optional, string) The CPU limit for the model, e.g., "8"
# - limitMemory: (Optional, string) The memory limit for the model, e.g., "32Gi"
# Note: If limitCPU and limitMemory are not specified, only GPU resources will have limits set equal to their requests.
# - limitGPUMem: (Optional, string) Requires HAMi. The limit of GPU memory, e.g., 3000. Each unit equals to 1M. https://project-hami.io/docs/userguide/NVIDIA-device/specify-device-memory-usage
# - limitGPUMemPercentage: (Optional, string) Requires HAMi. The limit of GPU memory of GPU, e.g., "80"
# - limitGPUCores: (Optional, string) Requires HAMi. The limit of GPU cores, e.g., "10". Each unit equals to 1% device cores. https://project-hami.io/docs/userguide/NVIDIA-device/specify-device-core-usage
# - pvcStorage: (Optional, string) The amount of storage requested for the model, e.g., "50Gi".
# - pvcAccessMode: (Optional, list) The access mode policy for the mounted volume, e.g., ["ReadWriteOnce"]
# - storageClass: (Optional, String) The storage class of the PVC e.g., "", default is ""
# - pvcMatchLabels: (Optional, map) The labels to match the PVC, e.g., {model: "opt125m"}
# - pvcLabels: (Optional, map} The labels to add to the PVC, e.g., {label_excluded_from_alerts: "true"}
# - pvcAnnotations: (Optional, map} The annotations to add to the PVC
# - extraVolumes: (Optional, list) Additional volumes to add to the pod, in Kubernetes volume format. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#volume-v1-core
# Example for an emptyDir volume:
# extraVolumes:
# - name: tmp-volume
# emptyDir:
# medium: ""
# sizeLimit: 5Gi
# - extraVolumeMounts: (Optional, list) Additional volume mounts to add to the container, in Kubernetes volumeMount format. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#volumemount-v1-core
# Example for mounting the tmp-volume to /tmp:
# extraVolumeMounts:
# - name: tmp-volume
# mountPath: /tmp
# - initContainer: (optional, list of objects) The configuration for the init container to be run before the main container.
# - name: (string) The name of the init container, e.g., "init"
# - image: (string) The Docker image for the init container, e.g., "busybox:latest"
# - command: (optional, list) The command to run in the init container, e.g., ["sh", "-c"]
# - args: (optional, list) Additional arguments to pass to the command, e.g., ["ls"]
# - env: (optional, list) List of environment variables to set in the container, each being a map with:
# - resources: (optional, map) The resource requests and limits for the container:
# - mountPvcStorage: (optional, bool) Whether to mount the model's volume.
# - extraVolumeMounts: (Optional, list) Additional volume mounts to add to the init container, in Kubernetes volumeMount format. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#volumemount-v1-core
#
# - vllmConfig: (optional, map) The configuration for the VLLM model, supported options are:
# - v0: (optional, map) Specify to 1 to use vLLM v0, otherwise vLLM v1 is used. e.g., 0
# - enablePrefixCaching: (optional, bool) Enable prefix caching, e.g., false
# - enableChunkedPrefill: (optional, bool) Enable chunked prefill, e.g., false
# - maxModelLen: (optional, int) The maximum model length, e.g., 16384
# - dtype: (optional, string) The data type, e.g., "bfloat16"
# - tensorParallelSize: (optional, int) The degree of tensor parallelism, e.g., 2
# - maxNumSeqs: (optional, int) Maximum number of sequences to be processed in a single iteration., e.g., 32
# - maxLoras: (optional, int) The maximum number of LoRA models to be loaded in a single batch, e.g., 4
# - gpuMemoryUtilization: (optional, float) The fraction of GPU memory to be used for the model executor, which can range from 0 to 1. e.g., 0.95
# - runner: (optional, string) The runner type for the model, can be "auto" or "pooling". e.g., "pooling"
# - convert: (optional, string) The conversion type for the model, can be "token_embed", "embed", "token_classify", "classify", or "score". e.g., "embed"
# - extraArgs: (optional, list) Extra command line arguments to pass to vLLM, e.g., ["--gpu-memory-utilization", "0.4"]
#
# - lmcacheConfig: (optional, map) The configuration of the LMCache for KV offloading, supported options are:
# - enabled: (optional, bool) Enable LMCache, e.g., true
# - cpuOffloadingBufferSize: (optional, string) The CPU offloading buffer size, e.g., "30"
# - logLevel: (optional, string) The log level for LMCache, e.g., "DEBUG", "INFO", "WARNING", "ERROR"
#
# - hf_token: (optional) Hugging Face token configuration. Can be either:
# - A string containing the token directly (will be stored in a generated secret)
# - An object referencing an existing secret:
# secretName: "my-existing-secret"
# secretKey: "hf-token-key"
#
# - envFromSecret: (optional) Reference to an existing Kubernetes Secret from which
# all key/value pairs will be loaded as environment variables into the container.
# Example:
# envFromSecret:
# name: s3-registry
#
# The referenced Secret could look like:
# apiVersion: v1
# kind: Secret
# metadata:
# name: s3-registry
# namespace: vllm
# data:
# AWS_ACCESS_KEY_ID: <base64-encoded-value>
# AWS_SECRET_ACCESS_KEY: <base64-encoded-value>
# AWS_ENDPOINT_URL: <base64-encoded-value>
#
# - env: (optional, list) The environment variables to set in the container, e.g., your HF_TOKEN
#
# - affinity: (optional, map) Affinity configuration
# - nodeSelectorTerms: (optional, list) The node selector terms to match the nodes. When both affinity and nodeSelectorTerms are defined, nodeSelectorTerms will be ignored.
# - nodeName: (optional) Directly assigns a pod to a specific node (e.g., "192.168.56.5"). When both nodeName and nodeSelectorTerms are defined, the preference is given to nodeName.
# - shmSize: (optional, string) The size of the shared memory, e.g., "20Gi"
# - enableLoRA: (optional, bool) Whether to enable LoRA, e.g., true
#
# - keda: (optional, map) KEDA autoscaling configuration for this model deployment. Requires KEDA to be installed in the cluster.
# - enabled: (optional, bool) Whether to enable KEDA autoscaling for this model, e.g., true
# - minReplicaCount: (optional, int) Minimum number of replicas (supports 0 for scale-to-zero), e.g., 1
# - maxReplicaCount: (optional, int) Maximum number of replicas, e.g., 5
# - pollingInterval: (optional, int) How often KEDA should check the metrics (in seconds), e.g., 15
# - cooldownPeriod: (optional, int) How long to wait before scaling down after scaling up (in seconds), e.g., 360
# - idleReplicaCount: (optional, int) Number of replicas to scale to when no triggers are active, e.g., 0
# - initialCooldownPeriod: (optional, int) Initial cooldown period in seconds before scaling down after creation, e.g., 60
# - fallback: (optional, map) Fallback configuration when scaler fails
# - failureThreshold: (int) Number of consecutive failures before fallback, e.g., 3
# - replicas: (int) Number of replicas to scale to in fallback, e.g., 2
# - triggers: (optional, list) List of KEDA trigger configurations
# - type: (string) Trigger type, e.g., "prometheus"
# - metadata: (map) Trigger metadata
# - serverAddress: (string) Prometheus server address, e.g., "http://prometheus-operated.monitoring.svc:9090"
# - metricName: (string) Name of the metric to monitor, e.g., "vllm:num_requests_waiting"
# - query: (string) Prometheus query to fetch the metric, e.g., "vllm:num_requests_waiting"
# - threshold: (string) Threshold value that triggers scaling, e.g., "5"
# - advanced: (optional, map) Advanced KEDA configuration
# - restoreToOriginalReplicaCount: (optional, bool) Restore original replica count when ScaledObject is deleted, e.g., false
# - horizontalPodAutoscalerConfig: (optional, map) HPA-specific configuration
# - name: (optional, string) Custom name for the HPA resource, default: "keda-hpa-{scaled-object-name}"
# - behavior: (optional, map) HPA scaling behavior configuration, see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
# - scalingModifiers: (optional, map) Scaling modifiers for composite metrics
# - target: (string) Target value for the composed metric
# - activationTarget: (optional, string) Activation target for the composed metric
# - metricType: (optional, string) Metric type (AverageValue or Value), default: "AverageValue"
# - formula: (string) Formula to compose metrics together
#
# Example:
# vllmApiKey: "vllm_xxxxxxxxxxxxx"
# modelSpec:
# - name: "mistral"
# annotations:
# model: "mistral"
# podAnnotations:
# model: "mistral"
# serviceAccountName: "vllm-service-account"
# repository: "lmcache/vllm-openai"
# tag: "latest"
# modelURL: "mistralai/Mistral-7B-Instruct-v0.2"
# replicaCount: 1
# pdb:
# enabled: false
# labels: {}
# annotations: {}
# minAvailable: ""
# maxUnavailable: ""
# imagePullPolicy: "Always" # or "IfNotPresent"
#
# requestCPU: 10
# requestMemory: "64Gi"
# requestGPU: 1
#
# pvcStorage: "50Gi"
# pvcLabels:
# label_excluded_from_alerts: "true"
# pvcAnnotations:
# annotation_example: "value"
# pvcAccessMode:
# - ReadWriteOnce
# pvcMatchLabels:
# model: "mistral"
# initContainer:
# name: my-container
# image: busybox
# command: ["sh"]
# env: {}
# args: []
# resources: {}
# mountPvcStorage: true
#
# vllmConfig:
# enableChunkedPrefill: false
# enablePrefixCaching: false
# maxModelLen: 16384
# dtype: "bfloat16"
# maxNumSeqs: 32
# gpuMemoryUtilization: 0.95
# maxLoras: 4
# extraArgs: ["--trust-remote-code"]
#
# lmcacheConfig:
# enabled: true
# cpuOffloadingBufferSize: "30"
# logLevel: "INFO"
#
# hf_token: "hf_xxxxxxxxxxxxx"
#
#
# nodeSelectorTerms:
# - matchExpressions:
# - key: nvidia.com/gpu.product
# operator: "In"
# values:
# - "NVIDIA-RTX-A6000"
#
# keda:
# enabled: true
# minReplicaCount: 1
# maxReplicaCount: 3
# pollingInterval: 15
# cooldownPeriod: 360
# triggers:
# - type: prometheus
# metadata:
# serverAddress: http://prometheus-operated.monitoring.svc:9090
# metricName: vllm:num_requests_waiting
# query: vllm:num_requests_waiting
# threshold: '5'
# extraVolumes:
# - name: dev-fuse
# hostPath:
# path: /dev/fuse
# type: CharDevice
# - name: cache-dir
# hostPath:
# path: /var/cache/vllm
# extraVolumeMounts:
# - name: dev-fuse
# mountPath: /dev/fuse
# readOnly: true
# - name: cache-dir
# mountPath: /cache
# readOnly: false
modelSpec: []
# -- Container port
containerPort: 8000
# -- Service port
servicePort: 80
# -- Set other environment variables from config map
configs: {}
# -- deployment strategy
strategy: {}
# -- Readiness probe configuration
startupProbe:
# -- Number of seconds after the container has started before startup probe is initiated
initialDelaySeconds: 15
# -- How often (in seconds) to perform the startup probe
periodSeconds: 10
# -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready
failureThreshold:
60
# -- Configuration of the Kubelet http request on the server
httpGet:
# -- Path to access on the HTTP server
path: /health
# -- Name or number of the port to access on the container, on which the server is listening
port: 8000
# -- Liveness probe configuration
livenessProbe:
# -- Number of seconds after the container has started before liveness probe is initiated
initialDelaySeconds: 15
# -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive
failureThreshold: 3
# -- How often (in seconds) to perform the liveness probe
periodSeconds: 10
# -- Configuration of the Kubelet http request on the server
httpGet:
# -- Path to access on the HTTP server
path: /health
# -- Name or number of the port to access on the container, on which the server is listening
port: 8000
# -- Readiness probe configuration
readinessProbe:
# -- Number of seconds after the container has started before readiness probe is initiated
initialDelaySeconds: 15
# -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive
failureThreshold: 3
# -- How often (in seconds) to perform the readiness probe
periodSeconds: 5
# -- Configuration of the Kubelet http request on the server
httpGet:
# -- Path to access on the HTTP server
path: /health
# -- Name or number of the port to access on the container, on which the server is listening
port: 8000
# -- Disruption Budget Configuration
maxUnavailablePodDisruptionBudget: ""
# -- Tolerations configuration (when there are taints on nodes)
# Example:
# tolerations:
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Exists"
# effect: "NoSchedule"
tolerations: []
# -- RuntimeClassName configuration, set to "nvidia" if the model requires GPU
runtimeClassName: "nvidia"
# -- SchedulerName configuration
schedulerName: ""
# -- Pod-level security context configuration. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#podsecuritycontext-v1-core
securityContext: {}
# -- Run as a non-root user ID
# runAsUser: 1000
# -- Run with a non-root group ID
# runAsGroup: 1000
# -- Run as non-root
# runAsNonRoot: true
# -- Set the seccomp profile
# seccompProfile:
# type: RuntimeDefault
# -- Drop all capabilities
# capabilities:
# drop:
# - ALL
# -- Set the file system group ID for all containers
# fsGroup: 1000
# -- Container-level security context configuration. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#securitycontext-v1-core
containerSecurityContext:
# -- Run as non-root
runAsNonRoot: false
# -- Don't allow privilege escalation
# allowPrivilegeEscalation: false
# -- Drop all capabilities
# capabilities:
# drop:
# - ALL
# -- Read-only root filesystem
# readOnlyRootFilesystem: true
# -- Sidecar configuration
sidecar:
image: "lmcache/lmstack-sidecar:latest"
imagePullPolicy: "Always"
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
# -- Let prometheus add an exported_ prefix to conflicting labels
honorLabels: false
# -- Metric relabel configs to apply to samples before ingestion. [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
metricRelabelings: []
# - action: replace
# regex: (.*)
# replacement: $1
# sourceLabels:
# - exported_namespace
# targetLabel: namespace
# -- Relabel configs to apply to samples before ingestion. [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
# Optional cache server configuration.
# When set, resources accepts any Kubernetes resource keys (for example `rdma/ib`).
#
# Example:
# cacheserverSpec:
# repository: "lmcache/vllm-openai"
# tag: "latest-nightly"
# containerPort: 8000
# servicePort: 80
# serde: "naive"
# resources:
# requests:
# cpu: "4"
# memory: "8Gi"
# rdma/ib: "1"
# limits:
# cpu: "4"
# memory: "10Gi"
# rdma/ib: "1"
routerSpec:
# -- Whether to enable the router service
enableRouter: true
# -- The docker image of the router. The following values are defaults:
repository: "lmcache/lmstack-router"
tag: "latest"
imagePullPolicy: "Always"
# -- Pod-level security context configuration. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#podsecuritycontext-v1-core
securityContext: {}
# -- Container-level security context configuration. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#securitycontext-v1-core
containerSecurityContext: {}
# -- Image pull secrets for private container registries
# Example:
# imagePullSecrets:
# - name: my-registry-secret
imagePullSecrets: []
# -- Number of replicas
replicaCount: 1
# -- autoscaling configuration
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 80
# -- Router Pod Disruption Budget
# -- Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
pdb:
# -- Deploy a PodDisruptionBudget for the router deployment
enabled: false
# -- Labels to be added to router pdb
labels: {}
# -- Annotations to be added to router pdb
annotations: {}
# -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
minAvailable: ""
# -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%).
maxUnavailable: ""
# -- Priority Class
priorityClassName: ""
# -- Container port
containerPort: 8000
# -- Service type
serviceType: ClusterIP
# -- Service annotations if you use a LoadBalancer or NodePort service type
serviceAnnotations: {}
# -- Fixed NodePort for the router service when serviceType is NodePort.
# -- If not set, Kubernetes assigns a random port (range 30000-32767).
# nodePort: 30080
# -- Service port
servicePort: 80
# -- Service discovery mode, supports "k8s" or "static". Defaults to "k8s" if not set.
serviceDiscovery: "k8s"
# -- Service discovery mode type, supports "pod-ip" or "service-name". Defaults to "pod-ip" if not set.
k8sServiceDiscoveryType: "pod-ip"
# -- If serviceDiscovery is set to "static", the comma-separated values below are required. There needs to be the same number of backends and models
staticBackends: ""
staticModels: ""
# -- routing logic, supports "roundrobin", "session", "prefixaware", or "kvaware"
routingLogic: "roundrobin"
# -- session key if using "session" routing logic
sessionKey: ""
# -- lmcache controller port, used when routingLogic is "kvaware"
lmcacheControllerPort: ""
# -- FastAPI root path for hosting under a subpath (e.g. /vllm)
rootPath: ""
# -- extra router commandline arguments
extraArgs: []
# -- extra service ports
extraPorts: []
# -- Interval in seconds to scrape the serving engine metrics
engineScrapeInterval: 15
# -- Window size in seconds to calculate the request statistics
requestStatsWindow: 60
# -- OpenTelemetry tracing configuration
# When otelEndpoint is set, tracing is automatically enabled
otel:
# -- OTLP endpoint for tracing (e.g., "localhost:4317" or "otel-collector:4317")
endpoint: ""
# -- Service name for traces (default: "vllm-router")
serviceName: "vllm-router"
# -- Use secure (TLS) connection for OTLP exporter (default: false, i.e., insecure)
secure: false
# -- deployment strategy
strategy: {}
# vllmApiKey: (optional) api key for securing the vLLM models. Must be an object referencing an existing secret
# secretName: "my-existing-secret"
# secretKey: "vllm-api-key"
# -- router resource requests and limits
resources:
requests:
cpu: 400m
memory: 1000Mi
limits:
memory: 1000Mi
# -- Customized labels for the router deployment
labels:
environment: "router"
release: "router"
# -- Customized pod annotations for the router pods
podAnnotations: {}
ingress:
# -- Enable ingress controller resource
enabled: false
# -- IngressClass that will be used to implement the Ingress
className: ""
# -- Additional annotations for the Ingress resource
annotations:
{}
# kubernetes.io/ingress.class: alb
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# The list of hostnames to be covered with this ingress record.
hosts:
- host: vllm-router.local
paths:
- path: /
pathType: Prefix
# -- The tls configuration for hostnames to be covered with this ingress record.
tls: []
# - secretName: vllm-router-tls
# hosts:
# - vllm-router.local
# -- Expose the service via gateway-api HTTPRoute
# More routes can be added by adding a dictionary key like the 'main' route.
# Requires Gateway API resources and suitable controller installed within the cluster
# (see: https://gateway-api.sigs.k8s.io/guides/)
route:
main:
# -- Enables or disables the route
enabled: false
# -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
# -- Set the route kind
# Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
kind: HTTPRoute
annotations: {}
labels: {}
hostnames: []
# - my-filter.example.com
parentRefs: []
# - name: acme-gw
# -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
## matches, filters and additionalRules will be ignored if this is set to true.
httpsRedirect: false
matches:
- path:
type: PathPrefix
value: /
## Filters define the filters that are applied to requests that match this rule.
filters: []
## Additional custom rules that can be added to the route
additionalRules: []
# Affinity configuration
affinity: {}
# The node selector terms to match the nodes. Will be ignored if affinity is configured.
# Example:
# nodeSelectorTerms:
# - matchExpressions:
# - key: nvidia.com/gpu.product
# operator: "In"
# values:
# - "NVIDIA-RTX-A6000"
nodeSelectorTerms: []
# Router probes configuration
# <probe>.httpGet.port will automatically be set to .Values.routerSpec.containerPort
# Liveness probe configuration.
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 3
httpGet:
path: /health
# Startup probe configuration.
startupProbe:
initialDelaySeconds: 5
periodSeconds: 5
failureThreshold: 3
httpGet:
path: /health
# Readiness probe configuration.
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 3
httpGet:
path: /health
# -- TODO: Readiness probe configuration
#startupProbe:
# # -- Number of seconds after the container has started before startup probe is initiated
# initialDelaySeconds: 5
# # -- How often (in seconds) to perform the startup probe
# periodSeconds: 5
# # -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready
# failureThreshold: 100
# # -- Configuration of the Kubelet http request on the server
# httpGet:
# # -- Path to access on the HTTP server
#
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
# -- Let prometheus add an exported_ prefix to conflicting labels
honorLabels: false
# -- Metric relabel configs to apply to samples before ingestion. [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
metricRelabelings: []
# - action: replace
# regex: (.*)
# replacement: $1
# sourceLabels:
# - exported_namespace
# targetLabel: namespace
# -- Relabel configs to apply to samples before ingestion. [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
# -- LoRA Adapter Configuration
loraAdapters: []
# -- LoRA adapter instances to deploy
# Each instance should contain:
# - name: (string) The name of the LoRA adapter instance
# - baseModel: (string) The name of the base model this adapter is for
# - vllmApiKey: (optional) API key configuration for vLLM authentication
# - secretRef: Reference to a secret containing the API key
# - secretName: (string) Name of the secret
# - secretKey: (string) Key in the secret containing the API key
# - value: (string) Direct API key value
# - adapterSource: (object) Configuration for the adapter source
# - type: (string) Type of adapter source (local, s3, http, huggingface)
# - adapterName: (string) Name of the adapter to apply
# - adapterPath: (optional, string) Path to the LoRA adapter weights
# - repository: (optional, string) Repository to get the LoRA adapter from
# - pattern: (optional, string) Pattern to use for the adapter name
# - maxAdapters: (optional, int) Maximum number of adapters to load
# - credentials: (optional, object) Reference to secret with storage credentials
# - secretName: (string) Name of the secret
# - secretKey: (string) Key in the secret containing the credentials
# - loraAdapterDeploymentConfig: (object) Configuration for adapter deployment
# - algorithm: (string) Placement algorithm to use (default, ordered, equalized)
# - replicas: (optional, int) Number of replicas that should load this adapter
# - labels: (optional, map) Additional labels for the LoRA adapter
#
# Example:
# loraAdapters:
# - name: "llama3-nemoguard-adapter"
# baseModel: "llama3-8b-instr"
# vllmApiKey:
# secretRef:
# secretName: "vllm-api-key"
# secretKey: "VLLM_API_KEY"
# adapterSource:
# type: "local"
# adapterName: "llama-3.1-nemoguard-8b-topic-control"
# adapterPath: "/data/lora-adapters/llama-3.1-nemoguard-8b-topic-control"
# loraAdapterDeploymentConfig:
# algorithm: "default"
# replicas: 1
# labels:
# environment: "production"
# model: "llama3-nemoguard"
# -- lora controller Configuration
loraController:
enableLoraController: false
# -- kubernetes cluster domain
kubernetesClusterDomain: "cluster.local"
# -- Number of lora controller replicas
replicaCount: 1
# -- lora controller image configuration
image:
repository: "lmcache/lmstack-lora-controller"
tag: "latest"
pullPolicy: "IfNotPresent"
# -- Image pull secrets
imagePullSecrets: []
# -- Deployment annotations
annotations: {}
# -- Deployment labels
labels: {}
# -- Pod annotations
podAnnotations: {}
# -- Pod labels
podLabels: {}
# -- Pod security context
podSecurityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# -- Container security context
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# -- lora controller resources
resources: {}
# -- Node selector
nodeSelector: {}
# -- Affinity
affinity: {}
# -- Tolerations
tolerations: []
# -- Environment variables
env: []
# -- Extra arguments for the lora controller
extraArgs: []
# -- Expose lora controller metrics
metrics:
enabled: true
# -- Webhook configuration
webhook:
enabled: false
# -- loraController Pod Disruption Budget
# -- Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
pdb:
# -- Deploy a PodDisruptionBudget for the loraController deployment
enabled: false
# -- Labels to be added to loraController pdb
labels: {}
# -- Annotations to be added to loraController pdb
annotations: {}
# -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
minAvailable: ""
# -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%).
maxUnavailable: ""
# -- Array of extra K8s manifests to deploy. Supports use of custom Helm templates
extraObjects: []
# -- Set to true do deploy dashboards stored in the "dashboards" directory as configmaps. This requires the kube-prometheus-stack to be deployed with Grafana enabled and properly configured to pick up dashboards from configmaps.
grafanaDashboards:
enabled: false
# -- Additional annotations to add to the dashboard configmaps
annotations: {}
# -- Additional labels to add to the dashboard configmaps
labels:
grafana_dashboard: "1"
# -- Kube Prometheus Stack dependency chart configuration. More values can be found [here](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml).
kube-prometheus-stack:
enabled: false
# -- Configure the Prometheus Operator to automatically discover and scrape metrics from the vLLM chart using ServiceMonitors.
prometheus:
prometheusSpec:
serviceMonitorSelector:
matchLabels:
app.kubernetes.io/part-of: vllm-stack
# -- Prometheus Adapter dependency chart configuration. More values can be found [here](https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus-adapter/values.yaml).
prometheus-adapter:
enabled: false
prometheus:
url: http://{{ .Release.Name }}-kube-prometheus-stack-prometheus
port: 9090
# rules:
# default: true
# custom:
# # Example metric to export for HPA
# - seriesQuery: '{__name__=~"^vllm:num_requests_waiting$"}'
# resources:
# overrides:
# namespace:
# resource: "namespace"
# name:
# matches: ""
# as: "vllm_num_requests_waiting"
# metricsQuery: sum by(namespace) (vllm:num_requests_waiting)
# # Export num_incoming_requests_total by model name
# - seriesQuery: '{__name__=~"^vllm:num_incoming_requests_total$"}'
# resources:
# overrides:
# namespace:
# resource: "namespace"
# name:
# matches: ""
# as: "vllm_num_incoming_requests_total"
# metricsQuery: sum by(namespace, model) (vllm:num_incoming_requests_total)