diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml new file mode 100644 index 0000000..345ee12 --- /dev/null +++ b/.github/workflows/helm-lint.yaml @@ -0,0 +1,44 @@ +# NOTE: This workflow can be run locally using https://github.com/nektos/act with: +# act -W .github/workflows/helm-lint.yaml workflow_call -s GITHUB_TOKEN=$(gh auth token) +name: Helm Lint +on: + workflow_call: + inputs: + ref: + type: string + description: The Git ref under test. + required: true + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref }} + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.15.3 + + - name: Set up chart-testing + uses: helm/chart-testing-action@v2 + + - name: Run chart-testing (lint) + run: |- + ct lint \ + --target-branch ${{ github.event.repository.default_branch }} \ + --charts charts/operator \ + --validate-maintainers=false + + - name: Run template validation + run: |- + helm template foo charts/operator \ + | docker run -i --rm ghcr.io/yannh/kubeconform:latest \ + --strict --summary + + - name: Run manifest snapshot test + run: docker run -i --rm -v $(pwd):/apps helmunittest/helm-unittest charts/operator diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 4ca021b..f0a6e11 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -15,8 +15,13 @@ jobs: with: ref: ${{ github.ref }} + helm_lint: + uses: ./.github/workflows/helm-lint.yaml + with: + ref: ${{ github.ref }} + publish_images: - needs: [unit_tests] + needs: [unit_tests, helm_lint] uses: ./.github/workflows/publish-images.yaml with: ref: ${{ github.ref }} diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 7bc9ff1..1454b7c 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -21,9 +21,14 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} + helm_lint: + uses: ./.github/workflows/helm-lint.yaml + with: + ref: ${{ github.event.pull_request.head.sha }} + # When the PR is from a branch of the main repo, publish images and charts publish_images: - needs: [unit_tests] + needs: [unit_tests, helm_lint] uses: ./.github/workflows/publish-images.yaml with: ref: ${{ github.event.pull_request.head.sha }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..4c25346 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing + +We welcome contributions and suggestions for improvements to this code base. +Please check for relevant issues and PRs before opening a new one of your own. + +## Making a contribution + +### Helm template snapshots + +The CI in this repository uses the Helm +[unittest](https://github.com/helm-unittest/helm-unittest) plugin's +snapshotting functionality to check PRs for changes to the templated manifests. +Therefore, if your PR makes changes to the manifest templates or values, you +will need to update the saved snapshots to allow your changes to pass the +automated tests. The easiest way to do this is to run the `helm unittest` command +inside a docker container from the repo root. + +``` +docker run -i --rm -v $(pwd):/apps helmunittest/helm-unittest charts/operator -u +``` + +where the `-u` option is used to update the existing snapshots. diff --git a/charts/operator/.helmignore b/charts/operator/.helmignore index 0e8a0eb..10a0506 100644 --- a/charts/operator/.helmignore +++ b/charts/operator/.helmignore @@ -21,3 +21,5 @@ .idea/ *.tmproj .vscode/ +# Helm unit test files +tests/ diff --git a/charts/operator/tests/__snapshot__/snapshot_test.yaml.snap b/charts/operator/tests/__snapshot__/snapshot_test.yaml.snap new file mode 100644 index 0000000..5b8d1b4 --- /dev/null +++ b/charts/operator/tests/__snapshot__/snapshot_test.yaml.snap @@ -0,0 +1,211 @@ +templated manifests should match snapshot: + 1: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: azimuth-schedule-operator + app.kubernetes.io/version: main + helm.sh/chart: azimuth-schedule-operator-0.1.0 + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: release-name-azimuth-schedule-operator:edit + rules: + - apiGroups: + - scheduling.azimuth.stackhpc.com + resources: + - '*' + verbs: + - '*' + 2: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: azimuth-schedule-operator + app.kubernetes.io/version: main + helm.sh/chart: azimuth-schedule-operator-0.1.0 + name: release-name-azimuth-schedule-operator:controller + rules: + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - '*' + - apiGroups: + - "" + resources: + - namespaces + verbs: + - list + - watch + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - apiGroups: + - scheduling.azimuth.stackhpc.com + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - delete + - apiGroups: + - caas.azimuth.stackhpc.com + resources: + - clusters + verbs: + - get + - delete + - apiGroups: + - azimuth.stackhpc.com + resources: + - clusters + verbs: + - get + - delete + 3: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: azimuth-schedule-operator + app.kubernetes.io/version: main + helm.sh/chart: azimuth-schedule-operator-0.1.0 + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: release-name-azimuth-schedule-operator:view + rules: + - apiGroups: + - scheduling.azimuth.stackhpc.com + resources: + - '*' + verbs: + - get + - list + - watch + 4: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: azimuth-schedule-operator + app.kubernetes.io/version: main + helm.sh/chart: azimuth-schedule-operator-0.1.0 + name: release-name-azimuth-schedule-operator + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: release-name-azimuth-schedule-operator:controller + subjects: + - kind: ServiceAccount + name: release-name-azimuth-schedule-operator + namespace: NAMESPACE + 5: | + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: azimuth-schedule-operator + app.kubernetes.io/version: main + helm.sh/chart: azimuth-schedule-operator-0.1.0 + name: release-name-azimuth-schedule-operator + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/name: azimuth-schedule-operator + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/name: azimuth-schedule-operator + spec: + containers: + - env: + - name: AZIMUTH_SCHEDULE_CHECK_INTERVAL_SECONDS + value: "60" + - name: AZIMUTH_LEASE_CHECK_INTERVAL_SECONDS + value: "60" + - name: AZIMUTH_LEASE_DEFAULT_GRACE_PERIOD_SECONDS + value: "600" + - name: AZIMUTH_LEASE_BLAZAR_ENABLED + value: auto + image: ghcr.io/azimuth-cloud/azimuth-schedule-operator:main + imagePullPolicy: IfNotPresent + name: operator + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: tmp + securityContext: + runAsNonRoot: true + serviceAccountName: release-name-azimuth-schedule-operator + volumes: + - emptyDir: {} + name: tmp + 6: | + apiVersion: v1 + kind: Service + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: azimuth-schedule-operator + app.kubernetes.io/version: main + helm.sh/chart: azimuth-schedule-operator-0.1.0 + name: release-name-azimuth-schedule-operator + spec: + ports: + - name: metrics + port: 8080 + protocol: TCP + targetPort: metrics + selector: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/name: azimuth-schedule-operator + type: ClusterIP + 7: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app.kubernetes.io/instance: RELEASE-NAME + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: azimuth-schedule-operator + app.kubernetes.io/version: main + helm.sh/chart: azimuth-schedule-operator-0.1.0 + name: release-name-azimuth-schedule-operator diff --git a/charts/operator/tests/snapshot_test.yaml b/charts/operator/tests/snapshot_test.yaml new file mode 100644 index 0000000..262562e --- /dev/null +++ b/charts/operator/tests/snapshot_test.yaml @@ -0,0 +1,7 @@ +# To update manifest snapshots run helm unittest plugin with -u option: +# docker run -i --rm -v $(pwd):/apps helmunittest/helm-unittest -u chart +suite: Manifest snapshot tests +tests: + - it: templated manifests should match snapshot + asserts: + - matchSnapshot: {}