From 404e1afbfbf81736326edee29425c82cd560f743 Mon Sep 17 00:00:00 2001 From: Martin Jackson Date: Tue, 25 Mar 2025 09:34:26 -0500 Subject: [PATCH] Shift to fix dataimportcrons strategy Add datasources to RBAC Add dataimportcrons to RBAC Update changelog Update ocp-v channel Fix some lint errors Disable natural language linter in github workflow due to legacy issues in old common Remove trailing whitespace remove blank line --- .github/workflows/superlinter.yml | 1 + Changes.md | 6 +- ansible/odf_fix_dataimportcrons.yml | 138 ++++++++++++++++++++++++++++ values-hub.yaml | 5 +- 4 files changed, 147 insertions(+), 3 deletions(-) create mode 100644 ansible/odf_fix_dataimportcrons.yml diff --git a/.github/workflows/superlinter.yml b/.github/workflows/superlinter.yml index cb802a53..e22ad069 100644 --- a/.github/workflows/superlinter.yml +++ b/.github/workflows/superlinter.yml @@ -33,6 +33,7 @@ jobs: VALIDATE_YAML: false VALIDATE_ANSIBLE: false VALIDATE_DOCKERFILE_HADOLINT: false + NATURAL_LANGUAGE: false # VALIDATE_MARKDOWN: false # VALIDATE_NATURAL_LANGUAGE: false #VALIDATE_TEKTON: false diff --git a/Changes.md b/Changes.md index 90087cde..d44d3774 100644 --- a/Changes.md +++ b/Changes.md @@ -14,7 +14,7 @@ ## Changes for v1.2 (February 9, 2023) -* Kiosk_mode improvements: kiosk_mode role now has a variable `kiosk_port` which influences the kiosk-mode script and controls which port firefox connects to. (Previously this was hard-coded to port 8088; the var defaults to 8088 so existing setups will continue to work. This will make it easier to tailor or customize the pattern to work with containers other than Ignition. +* Kiosk_mode improvements: kiosk_mode role now has a variable `kiosk_port` which influences the kiosk-mode script and controls which port firefox connects to. (Previously this was hardcoded to port 8088; the var defaults to 8088 so existing setups will continue to work. This will make it easier to tailor or customize the pattern to work with containers other than Ignition. * cloud-init changes: move the cloud-init configuration file, user, and password to secrets from edge-gitops-vms values. This was a regrettable oversight in v1.0 and v1.1. @@ -26,7 +26,7 @@ * No "visible" changes so not updating the branch pointer -* Updated ansible code to follow best practices and silent many linter warnings +* Updated ansible code to follow best practices and silent many linting warnings * Updated edge-gitops-vms chart to add SkipDryRunOnMissingResource annotations to prevent errors occuring due to race conditions with OpenShift Virtualization @@ -52,3 +52,5 @@ * Update deploy_kubevirt_worker.yml Ansible playbook to copy securityGroups and blockDevices config from first machineSet. Tag naming schemes changed from OCP 4.15 to 4.16; this method ensures forward and backward compatibility. * Remove ODF overrides from OCP 4.12/3 that force storageClass to gp2; all released versions should use gp3-csi now. * Include overrides for OCP 4.12 and OCP 4.13 to use the older `ocs-storagecluster-ceph-rbd` storageClass. +* Backport odf_fix_dataimportcrons.yml from development Ansible Edge GitOps/Federated Edge Observability and + use stable channel for KubeVirt/OCP-V (3/25/2025) diff --git a/ansible/odf_fix_dataimportcrons.yml b/ansible/odf_fix_dataimportcrons.yml new file mode 100644 index 00000000..3764aa49 --- /dev/null +++ b/ansible/odf_fix_dataimportcrons.yml @@ -0,0 +1,138 @@ +#!/usr/bin/env ansible-playbook +--- +# This workaround was implemented to fix a problem where openshift-cnv would +# not recognize a default virt storage class change and change the format of +# datasources. The issue was fixed in OpenShift Virtualization 4.16.4. +- name: Determine if we have PVC clean-up to do + become: false + connection: local + hosts: localhost + gather_facts: false + vars: + kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" + dataimportcron_cleanup: false + image_cleanup_namespace: "openshift-virtualization-os-images" + cluster_version: "{{ global['clusterVersion'] | default('UNSET') }}" + tasks: + - name: Check cluster version + ansible.builtin.debug: + var: cluster_version + + - name: Exit if normal version check is not right + ansible.builtin.meta: end_play + when: cluster_version not in [ '4.16', '4.17', '4.18', 'UNSET' ] + + - name: Find default storageclass + ansible.builtin.shell: | + set -o pipefail + oc get storageclass -o json | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class")' + register: default_sc_output + changed_when: false + + - name: Find virtualization default storageclass + ansible.builtin.shell: | + set -o pipefail + oc get storageclass -o json | jq -r '.items[] | select(.metadata.annotations."storageclass.kubevirt.io/is-default-virt-class")' + register: default_virt_sc_output + changed_when: false + + - name: Compare default virtualization storageclass and default storageclass to determine whether to clean PVCs + block: + - name: Parse results + ansible.builtin.set_fact: + default_sc: '{{ default_sc_output.stdout | from_json }}' + default_virt_sc: '{{ default_virt_sc_output.stdout | from_json }}' + + - name: Commit to dataimportcron cleanup + ansible.builtin.set_fact: + dataimportcron_cleanup: true + when: + - default_virt_sc.metadata.name == "ocs-storagecluster-ceph-rbd-virtualization" + - default_sc.metadata.name != default_virt_sc.metadata.name + rescue: + - name: Note that we exited + ansible.builtin.debug: + msg: "Caught an error before we could determine to clean up dataimportcrons, exiting" + + - name: End play (successfully) + ansible.builtin.meta: end_play + + - name: Cleanup incorrect datasourceimport images (PVCs) + when: + - dataimportcron_cleanup + block: + - name: Find dataimportcrons + kubernetes.core.k8s_info: + kind: dataimportcron + namespace: '{{ image_cleanup_namespace }}' + api_version: cdi.kubevirt.io/v1beta1 + register: dic_list + + - name: Extract dic names + ansible.builtin.set_fact: + dic_names: "{{ dic_names | default([]) + [ item.metadata.name ] }}" + loop: "{{ dic_list.resources }}" + + - name: Show names + ansible.builtin.debug: + var: dic_names + + - name: Find datasources to cleanup + kubernetes.core.k8s_info: + kind: datasource + namespace: '{{ image_cleanup_namespace }}' + api_version: cdi.kubevirt.io/v1beta1 + register: ds_cleanup_list + + - name: Keep track of objects to remove + ansible.builtin.set_fact: + cron_cleanups: [] + ds_cleanups: [] + + - name: Record datasources that need cleanup + ansible.builtin.set_fact: + cron_cleanups: "{{ cron_cleanups + [ item.metadata.labels['cdi.kubevirt.io/dataImportCron'] ] }}" + ds_cleanups: "{{ ds_cleanups + [ item.metadata.name ] }}" + loop: "{{ ds_cleanup_list.resources }}" + when: + - item['metadata']['labels']['cdi.kubevirt.io/dataImportCron'] is defined + - item['metadata']['labels']['cdi.kubevirt.io/dataImportCron'] in dic_names + - item.status.conditions[0].message != "DataSource is ready to be consumed" + + - name: Check on removables + ansible.builtin.debug: + msg: + - "cron_cleanups: {{ cron_cleanups }}" + - "ds_cleanups: {{ ds_cleanups }}" + + - name: Delete datasources in cleanup list + kubernetes.core.k8s: + kind: datasource + namespace: '{{ image_cleanup_namespace }}' + api_version: cdi.kubevirt.io/v1beta1 + name: "{{ item }}" + state: absent + loop: "{{ ds_cleanups }}" + + - name: Delete datavolumes in cleanup list + kubernetes.core.k8s: + kind: datavolume + namespace: '{{ image_cleanup_namespace }}' + api_version: cdi.kubevirt.io/v1beta1 + label_selectors: + - 'cdi.kubevirt.io/dataImportCron={{ item }}' + state: absent + loop: "{{ cron_cleanups }}" + + - name: Delete dataimportcrons in cleanup list + kubernetes.core.k8s: + kind: dataimportcron + namespace: '{{ image_cleanup_namespace }}' + api_version: cdi.kubevirt.io/v1beta1 + name: "{{ item }}" + state: absent + loop: "{{ cron_cleanups }}" + rescue: + - name: Note that we exited + ansible.builtin.debug: + msg: "Caught an error while cleaning up dataimportcrons, exiting" diff --git a/values-hub.yaml b/values-hub.yaml index 1129628d..d753046f 100644 --- a/values-hub.yaml +++ b/values-hub.yaml @@ -19,6 +19,7 @@ clusterGroup: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv + channel: stable openshift-data-foundation: name: odf-operator @@ -33,7 +34,7 @@ clusterGroup: playbook: ansible/deploy_kubevirt_worker.yml verbosity: -vvv - name: clean-golden-images - playbook: ansible/odf_clean_pvcs.yml + playbook: ansible/odf_fix_dataimportcrons.yml image: quay.io/hybridcloudpatterns/utility-container:latest verbosity: -vvv - name: configure-aap-controller @@ -48,6 +49,8 @@ clusterGroup: - machinesets - persistentvolumeclaims - datavolumes + - datasources + - dataimportcrons verbs: - "*" - apiGroups: