diff --git a/charts/.gitignore b/charts/.gitignore index 264930c8..8f77c54f 100644 --- a/charts/.gitignore +++ b/charts/.gitignore @@ -2,3 +2,5 @@ values.yaml values.*.yaml k8s_hosts.ini helmfile.y?ml + +*.tgz diff --git a/charts/adminer/templates/deployment.yaml b/charts/adminer/templates/deployment.yaml index c687ad41..a084cd8f 100644 --- a/charts/adminer/templates/deployment.yaml +++ b/charts/adminer/templates/deployment.yaml @@ -13,9 +13,9 @@ spec: {{- include "adminer.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} + {{- if .Values.podAnnotations }} annotations: - {{- toYaml . | nindent 8 }} + {{- tpl (toYaml .Values.podAnnotations) . | nindent 8 }} {{- end }} labels: {{- include "adminer.labels" . | nindent 8 }} diff --git a/charts/adminer/templates/networkpolicy.yaml b/charts/adminer/templates/networkpolicy.yaml new file mode 100644 index 00000000..d39ed2ec --- /dev/null +++ b/charts/adminer/templates/networkpolicy.yaml @@ -0,0 +1,26 @@ +apiVersion: projectcalico.org/v3 +kind: NetworkPolicy +metadata: + name: adminer-network-policy + labels: + {{- include "adminer.labels" . | nindent 4 }} +spec: + selector: app.kubernetes.io/instance == "{{ .Release.Name }}" + ingress: + - action: Allow + protocol: TCP + destination: + ports: + - {{ .Values.service.port }} + egress: + - action: Allow + protocol: TCP + destination: + ports: + - 5432 + # allow dns requests to public dns servers + - action: Allow + protocol: UDP + destination: + ports: + - 53 diff --git a/charts/adminer/values.yaml.gotmpl b/charts/adminer/values.yaml.gotmpl index fb419e49..4be1bc89 100644 --- a/charts/adminer/values.yaml.gotmpl +++ b/charts/adminer/values.yaml.gotmpl @@ -25,7 +25,9 @@ serviceAccount: # If not set and create is true, a name is generated using the fullname template name: "" -podAnnotations: {} +podAnnotations: + # automatically restart pod on network policy change (to be sure new rules are applied) + checksum/networkpolicy: '{{`{{ include (print $.Template.BasePath "/networkpolicy.yaml") . | sha256sum }}`}}' podLabels: {} podSecurityContext: diff --git a/charts/calico-configuration/.helmignore b/charts/calico-configuration/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/calico-configuration/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/calico-configuration/Chart.yaml b/charts/calico-configuration/Chart.yaml new file mode 100644 index 00000000..81560549 --- /dev/null +++ b/charts/calico-configuration/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: calico-configuration +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "3.26.4" diff --git a/charts/calico-configuration/README.md b/charts/calico-configuration/README.md new file mode 100644 index 00000000..c722ade6 --- /dev/null +++ b/charts/calico-configuration/README.md @@ -0,0 +1,49 @@ +## How to add network policy (local deployment) + +How to discover ports / networks that are used by application +* observe existing traffic (see `Debug network policies` below) +* add staged policies to make sure all cases are included https://docs.tigera.io/calico/3.30/network-policy/staged-network-policies + - make sure deployed calico version supports it +* based on observations, create a needed network policy + +## Debug network policies + +if calico version 3.30+ is installed +* observe traffic and check `policies` field in whisker logs + - https://docs.tigera.io/calico/3.30/observability/enable-whisker + - https://docs.tigera.io/calico/3.30/observability/view-flow-logs + +if calico version <= 3.29 +* create network policy with action log + ```yaml + apiVersion: projectcalico.org/v3 + kind: NetworkPolicy + metadata: + name: log ingress requests + spec: + selector: app == 'db' + ingress: + - action: Log + ``` +* apply policy and see logs via journalctl (you can grep with `calico-packet`) + +## Known issues + +If network policy is created after pod, pod **MUST** be restarted for policy to take effect. Read more https://github.com/projectcalico/calico/issues/10753#issuecomment-3140717418 +* To automate this, we can add annotations with network policy checksum to pods (see https://stackoverflow.com/questions/58602311/will-helm-upgrade-restart-pods-even-if-they-are-not-affected-by-upgrade) + +## How to view existing policies + +via kubectl: +* `kubectl get networkpolicies.crd.projectcalico.org -n adminer` +* `kubectl describe networkpolicies.crd.projectcalico.org -n adminer default.adminer-network-policy` + +via calicoctl: +* `calicoctl get networkpolicy -n adminer -o yaml` + +Note: +* global network policies and network policies are separate resources for calico +* To see all resources execute `kubectl get crd | grep calico` or `calicoctl get --help` + +Warning: +* Network policies update are only applied to "new connections". To make them act, one may need to restart affected applications (pods) diff --git a/charts/calico-configuration/templates/NOTES.txt b/charts/calico-configuration/templates/NOTES.txt new file mode 100644 index 00000000..d08265bf --- /dev/null +++ b/charts/calico-configuration/templates/NOTES.txt @@ -0,0 +1,3 @@ +This chart configures Calico but does not deploy Calico itself. Calico is deployed during the Kubernetes cluster creation. + +Note: to make sure network policies are applied correctly, you may need to restart targeted application pods. diff --git a/charts/calico-configuration/templates/globalpolicy.yaml b/charts/calico-configuration/templates/globalpolicy.yaml new file mode 100644 index 00000000..8c8973e0 --- /dev/null +++ b/charts/calico-configuration/templates/globalpolicy.yaml @@ -0,0 +1,30 @@ +# Source: https://docs.tigera.io/calico/3.30/network-policy/get-started/kubernetes-default-deny +apiVersion: projectcalico.org/v3 +kind: GlobalNetworkPolicy +metadata: + name: default-global-deny-network-policy +spec: + # "kube-public", "kube-system", "kube-node-lease" -- system namespaces + # "calico-system", "calico-apiserver", "tigera-operator" -- calico namespaces (when installed via scripts [local deployment]) + # TODO: other namespaces are to be removed from this list (once appropriate network policies are created) + namespaceSelector: + kubernetes.io/metadata.name not in {"kube-public", "kube-system", "kube-node-lease", "calico-system", "calico-apiserver", "tigera-operator", "simcore", "cert-manager", "reflector", "traefik", "victoria-logs", "csi-s3", "portainer", "topolvm", "local-path-storage", "longhorn"} + types: + - Ingress + - Egress + egress: + # allow all namespaces to communicate to DNS pods + # this will also apply to pods that have network policy defined + # so that we don't need to define DNS policy for each pod + - action: Allow + protocol: UDP + destination: + selector: 'k8s-app == "kube-dns"' + ports: + - 53 + - action: Allow + protocol: TCP + destination: + selector: 'k8s-app == "kube-dns"' + ports: + - 53 diff --git a/charts/portainer/Chart.lock b/charts/portainer/Chart.lock new file mode 100644 index 00000000..dc3e362d --- /dev/null +++ b/charts/portainer/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: portainer + repository: https://portainer.github.io/k8s/ + version: 1.0.54 +digest: sha256:bafe4182881aee8c6df3d3c6f8c523a1bd7577bed04942ad3d9b857a5437d96f +generated: "2025-07-29T11:07:15.39037387+02:00" diff --git a/charts/portainer/Chart.yaml b/charts/portainer/Chart.yaml new file mode 100644 index 00000000..7a7b4f48 --- /dev/null +++ b/charts/portainer/Chart.yaml @@ -0,0 +1,29 @@ +apiVersion: v2 +name: portainer +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 1.0.54 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 2.21.2 + +dependencies: + - name: portainer + version: 1.0.54 + repository: "https://portainer.github.io/k8s/" diff --git a/charts/portainer/templates/NOTES.txt b/charts/portainer/templates/NOTES.txt new file mode 100644 index 00000000..48340702 --- /dev/null +++ b/charts/portainer/templates/NOTES.txt @@ -0,0 +1 @@ +Wrapper around portainer helm chart https://github.com/portainer/k8s diff --git a/charts/portainer/templates/networkpolicy.yaml b/charts/portainer/templates/networkpolicy.yaml new file mode 100644 index 00000000..6b21b510 --- /dev/null +++ b/charts/portainer/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +apiVersion: projectcalico.org/v3 +kind: NetworkPolicy +metadata: + name: portainer-network-policy +spec: + selector: app.kubernetes.io/instance == "portainer" + types: + - Ingress + - Egress + egress: + - action: Allow + protocol: TCP + # connect to the Kubernetes API server + destination: + ports: + - 6443 + nets: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + # coredns 53 allow from kube system + - action: Allow + protocol: UDP + destination: + # `selector: 'k8s-app == "kube-dns"'` does not work (so global policy default dns allow does not work) + # manually allow dns and use different selector that works. + selectorNamespace: kubernetes.io/metadata.name == "kube-system" + ports: + - 53 + ingress: + - action: Allow + # allow traffic to portainer GUI + protocol: TCP + destination: + ports: + - {{ .Values.servicePort }} diff --git a/charts/portainer/values.ebs-pv.yaml.gotmpl b/charts/portainer/values.ebs-pv.yaml.gotmpl index b31010f6..bae9c97b 100644 --- a/charts/portainer/values.ebs-pv.yaml.gotmpl +++ b/charts/portainer/values.ebs-pv.yaml.gotmpl @@ -1,4 +1,5 @@ -persistence: - enabled: true - size: "1Gi" # minimal size for gp3 is 1Gi - storageClass: "{{ .Values.ebsStorageClassName }}" +portainer: + persistence: + enabled: true + size: "1Gi" # minimal size for gp3 is 1Gi + storageClass: "{{ .Values.ebsStorageClassName }}" diff --git a/charts/portainer/values.longhorn-pv.yaml.gotmpl b/charts/portainer/values.longhorn-pv.yaml.gotmpl index 4eae1b7b..f5d9f717 100644 --- a/charts/portainer/values.longhorn-pv.yaml.gotmpl +++ b/charts/portainer/values.longhorn-pv.yaml.gotmpl @@ -1,4 +1,5 @@ -persistence: - enabled: true - size: "300Mi" # cannot be lower https://github.com/longhorn/longhorn/issues/8488 - storageClass: "{{ .Values.longhornStorageClassName }}" +portainer: + persistence: + enabled: true + size: "300Mi" # cannot be lower https://github.com/longhorn/longhorn/issues/8488 + storageClass: "{{ .Values.longhornStorageClassName }}" diff --git a/charts/portainer/values.s3-pv.yaml.gotmpl b/charts/portainer/values.s3-pv.yaml.gotmpl index e7b6b460..11d51e48 100644 --- a/charts/portainer/values.s3-pv.yaml.gotmpl +++ b/charts/portainer/values.s3-pv.yaml.gotmpl @@ -1,4 +1,5 @@ -persistence: - enabled: true - size: "1Gi" - storageClass: "csi-s3" +portainer: + persistence: + enabled: true + size: "1Gi" + storageClass: "csi-s3" diff --git a/charts/portainer/values.yaml.gotmpl b/charts/portainer/values.yaml.gotmpl index 8cdd2dfc..303d084e 100644 --- a/charts/portainer/values.yaml.gotmpl +++ b/charts/portainer/values.yaml.gotmpl @@ -1,69 +1,71 @@ -# Default values for adminer. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. +servicePort: &servicePort 9000 -replicaCount: 1 +portainer: + replicaCount: 1 -image: - repository: portainer/portainer-ce - pullPolicy: IfNotPresent + image: + repository: portainer/portainer-ce + pullPolicy: IfNotPresent -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" + imagePullSecrets: [] + nameOverride: "" + fullnameOverride: "" -serviceAccount: - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: portainer-sa-clusteradmin + serviceAccount: + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: portainer-sa-clusteradmin -persistence: {} + persistence: {} -podAnnotations: {} -podLabels: {} + # podAnnotations: {} + # Not implemented in portainer chart (see https://github.com/portainer/k8s/pull/183) + # Once implemented, we can use it to add checksum of network policy like in adminer -podSecurityContext: - {} + podLabels: {} -securityContext: - {} + podSecurityContext: + {} -service: - type: "ClusterIP" - port: 9000 + securityContext: + {} -ingress: - enabled: true - className: "" - annotations: - namespace: {{ .Release.Namespace }} - cert-manager.io/cluster-issuer: "cert-issuer" - traefik.ingress.kubernetes.io/router.entrypoints: websecure - traefik.ingress.kubernetes.io/router.middlewares: traefik-traefik-basic-auth@kubernetescrd,traefik-portainer-strip-prefix@kubernetescrd # namespace + middleware name - tls: - - hosts: - - {{ requiredEnv "K8S_MONITORING_FQDN" }} - secretName: monitoring-tls - hosts: - - host: {{ requiredEnv "K8S_MONITORING_FQDN" }} - paths: - - path: /portainer - pathType: Prefix - backend: - service: - name: portainer - port: - number: 9000 + service: + type: "ClusterIP" + port: *servicePort -resources: - limits: - cpu: 2 - memory: 1024Mi - requests: - cpu: 0.1 - memory: 128Mi + ingress: + enabled: true + className: "" + annotations: + namespace: {{ .Release.Namespace }} + cert-manager.io/cluster-issuer: "cert-issuer" + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.middlewares: traefik-traefik-basic-auth@kubernetescrd,traefik-portainer-strip-prefix@kubernetescrd # namespace + middleware name + tls: + - hosts: + - {{ requiredEnv "K8S_MONITORING_FQDN" }} + secretName: monitoring-tls + hosts: + - host: {{ requiredEnv "K8S_MONITORING_FQDN" }} + paths: + - path: /portainer + pathType: Prefix + backend: + service: + name: portainer + port: + number: *servicePort -nodeSelector: - ops: "true" + resources: + limits: + cpu: 2 + memory: 1024Mi + requests: + cpu: 0.1 + memory: 128Mi + + nodeSelector: + ops: "true"