Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions .github/workflows/helm-lint.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# NOTE: This workflow can be run locally using https://github.com/nektos/act with:
# act -W .github/workflows/helm-lint.yaml workflow_call -s GITHUB_TOKEN=$(gh auth token)
name: Helm Lint
on:
workflow_call:
inputs:
ref:
type: string
description: The Git ref under test.
required: true

jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
fetch-depth: 0

- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: v3.15.3

- name: Set up chart-testing
uses: helm/chart-testing-action@v2

- name: Run chart-testing (lint)
run: |-
ct lint \
--target-branch ${{ github.event.repository.default_branch }} \
--charts charts/operator \
--validate-maintainers=false

- name: Run template validation
run: |-
helm template foo charts/operator \
| docker run -i --rm ghcr.io/yannh/kubeconform:latest \
--strict --summary

- name: Run manifest snapshot test
run: docker run -i --rm -v $(pwd):/apps helmunittest/helm-unittest charts/operator
7 changes: 6 additions & 1 deletion .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,13 @@ jobs:
with:
ref: ${{ github.ref }}

helm_lint:
uses: ./.github/workflows/helm-lint.yaml
with:
ref: ${{ github.ref }}

publish_images:
needs: [unit_tests]
needs: [unit_tests, helm_lint]
uses: ./.github/workflows/publish-images.yaml
with:
ref: ${{ github.ref }}
Expand Down
7 changes: 6 additions & 1 deletion .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,14 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha }}

helm_lint:
uses: ./.github/workflows/helm-lint.yaml
with:
ref: ${{ github.event.pull_request.head.sha }}

# When the PR is from a branch of the main repo, publish images and charts
publish_images:
needs: [unit_tests]
needs: [unit_tests, helm_lint]
uses: ./.github/workflows/publish-images.yaml
with:
ref: ${{ github.event.pull_request.head.sha }}
Expand Down
22 changes: 22 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# Contributing

We welcome contributions and suggestions for improvements to this code base.
Please check for relevant issues and PRs before opening a new one of your own.

## Making a contribution

### Helm template snapshots

The CI in this repository uses the Helm
[unittest](https://github.com/helm-unittest/helm-unittest) plugin's
snapshotting functionality to check PRs for changes to the templated manifests.
Therefore, if your PR makes changes to the manifest templates or values, you
will need to update the saved snapshots to allow your changes to pass the
automated tests. The easiest way to do this is to run the `helm unittest` command
inside a docker container from the repo root.

```
docker run -i --rm -v $(pwd):/apps helmunittest/helm-unittest charts/operator -u
```

where the `-u` option is used to update the existing snapshots.
2 changes: 2 additions & 0 deletions charts/operator/.helmignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,5 @@
.idea/
*.tmproj
.vscode/
# Helm unit test files
tests/
211 changes: 211 additions & 0 deletions charts/operator/tests/__snapshot__/snapshot_test.yaml.snap
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
templated manifests should match snapshot:
1: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: azimuth-schedule-operator
app.kubernetes.io/version: main
helm.sh/chart: azimuth-schedule-operator-0.1.0
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: release-name-azimuth-schedule-operator:edit
rules:
- apiGroups:
- scheduling.azimuth.stackhpc.com
resources:
- '*'
verbs:
- '*'
2: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: azimuth-schedule-operator
app.kubernetes.io/version: main
helm.sh/chart: azimuth-schedule-operator-0.1.0
name: release-name-azimuth-schedule-operator:controller
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- apiGroups:
- scheduling.azimuth.stackhpc.com
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- delete
- apiGroups:
- caas.azimuth.stackhpc.com
resources:
- clusters
verbs:
- get
- delete
- apiGroups:
- azimuth.stackhpc.com
resources:
- clusters
verbs:
- get
- delete
3: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: azimuth-schedule-operator
app.kubernetes.io/version: main
helm.sh/chart: azimuth-schedule-operator-0.1.0
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: release-name-azimuth-schedule-operator:view
rules:
- apiGroups:
- scheduling.azimuth.stackhpc.com
resources:
- '*'
verbs:
- get
- list
- watch
4: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: azimuth-schedule-operator
app.kubernetes.io/version: main
helm.sh/chart: azimuth-schedule-operator-0.1.0
name: release-name-azimuth-schedule-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: release-name-azimuth-schedule-operator:controller
subjects:
- kind: ServiceAccount
name: release-name-azimuth-schedule-operator
namespace: NAMESPACE
5: |
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: azimuth-schedule-operator
app.kubernetes.io/version: main
helm.sh/chart: azimuth-schedule-operator-0.1.0
name: release-name-azimuth-schedule-operator
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/name: azimuth-schedule-operator
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/name: azimuth-schedule-operator
spec:
containers:
- env:
- name: AZIMUTH_SCHEDULE_CHECK_INTERVAL_SECONDS
value: "60"
- name: AZIMUTH_LEASE_CHECK_INTERVAL_SECONDS
value: "60"
- name: AZIMUTH_LEASE_DEFAULT_GRACE_PERIOD_SECONDS
value: "600"
- name: AZIMUTH_LEASE_BLAZAR_ENABLED
value: auto
image: ghcr.io/azimuth-cloud/azimuth-schedule-operator:main
imagePullPolicy: IfNotPresent
name: operator
ports:
- containerPort: 8080
name: metrics
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /tmp
name: tmp
securityContext:
runAsNonRoot: true
serviceAccountName: release-name-azimuth-schedule-operator
volumes:
- emptyDir: {}
name: tmp
6: |
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: azimuth-schedule-operator
app.kubernetes.io/version: main
helm.sh/chart: azimuth-schedule-operator-0.1.0
name: release-name-azimuth-schedule-operator
spec:
ports:
- name: metrics
port: 8080
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/name: azimuth-schedule-operator
type: ClusterIP
7: |
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: azimuth-schedule-operator
app.kubernetes.io/version: main
helm.sh/chart: azimuth-schedule-operator-0.1.0
name: release-name-azimuth-schedule-operator
7 changes: 7 additions & 0 deletions charts/operator/tests/snapshot_test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# To update manifest snapshots run helm unittest plugin with -u option:
# docker run -i --rm -v $(pwd):/apps helmunittest/helm-unittest -u chart
suite: Manifest snapshot tests
tests:
- it: templated manifests should match snapshot
asserts:
- matchSnapshot: {}
Loading