Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 40 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,22 @@ operator-sdk:
chmod +x ${OPERATOR_SDK}; \
fi

.PHONY: bundle-old
bundle-old: operator-sdk manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
rm -fr ./bundle
${OPERATOR_SDK} generate kustomize manifests --apis-dir api
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) worker=$(WORKER_IMG)
cd config/manager-base && $(KUSTOMIZE) edit set image must-gather=$(GATHER_IMG) signer=$(SIGNER_IMG)
cd config/webhook-server && $(KUSTOMIZE) edit set image webhook-server=$(WEBHOOK_IMG)

OPERATOR_SDK="${OPERATOR_SDK}" \
BUNDLE_GEN_FLAGS="${BUNDLE_GEN_FLAGS} --extra-service-accounts kmm-operator-module-loader,kmm-operator-device-plugin" \
PKG=kernel-module-management \
SOURCE_DIR=$(dir $(realpath $(lastword $(MAKEFILE_LIST)))) \
./hack/generate-bundle

${OPERATOR_SDK} bundle validate ./bundle

.PHONY: bundle
bundle: operator-sdk manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
rm -fr ./bundle
Expand All @@ -291,9 +307,31 @@ bundle: operator-sdk manifests kustomize ## Generate bundle manifests and metada
BUNDLE_GEN_FLAGS="${BUNDLE_GEN_FLAGS} --extra-service-accounts kmm-operator-module-loader,kmm-operator-device-plugin" \
PKG=kernel-module-management \
SOURCE_DIR=$(dir $(realpath $(lastword $(MAKEFILE_LIST)))) \
INCLUDE_NETWORK_POLICIES=true \
./hack/generate-bundle

${OPERATOR_SDK} bundle validate ./bundle
.PHONY: bundle-hub-old
bundle-hub-old: operator-sdk manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
rm -fr bundle-hub

${OPERATOR_SDK} generate kustomize manifests \
--apis-dir api-hub \
--output-dir config/manifests-hub \
--package kernel-module-management-hub \
--input-dir config/manifests-hub
cd config/manager-hub && $(KUSTOMIZE) edit set image controller=$(HUB_IMG)
cd config/manager-base && $(KUSTOMIZE) edit set image must-gather=$(GATHER_IMG) signer=$(SIGNER_IMG)
cd config/webhook-server && $(KUSTOMIZE) edit set image webhook-server=$(WEBHOOK_IMG)

OPERATOR_SDK="${OPERATOR_SDK}" \
BUNDLE_GEN_FLAGS="${BUNDLE_GEN_FLAGS}" \
MANIFESTS_DIR=config/manifests-hub \
PKG=kernel-module-management-hub \
SOURCE_DIR=$(dir $(realpath $(lastword $(MAKEFILE_LIST)))) \
SUFFIX="-hub" \
./hack/generate-bundle

${OPERATOR_SDK} bundle validate ./bundle-hub

.PHONY: bundle-hub
bundle-hub: operator-sdk manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
Expand All @@ -314,9 +352,9 @@ bundle-hub: operator-sdk manifests kustomize ## Generate bundle manifests and me
PKG=kernel-module-management-hub \
SOURCE_DIR=$(dir $(realpath $(lastword $(MAKEFILE_LIST)))) \
SUFFIX="-hub" \
INCLUDE_NETWORK_POLICIES=true \
./hack/generate-bundle

${OPERATOR_SDK} bundle validate ./bundle-hub

.PHONY: bundle-build-hub
bundle-build-hub: ## Build the bundle-hub image.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,19 +245,13 @@ spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kmm-hub
app.kubernetes.io/name: kmm-hub
app.kubernetes.io/part-of: kmm
control-plane: controller
strategy: {}
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
app.kubernetes.io/component: kmm-hub
app.kubernetes.io/name: kmm-hub
app.kubernetes.io/part-of: kmm
control-plane: controller
spec:
affinity:
Expand Down Expand Up @@ -342,19 +336,13 @@ spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kmm-hub
app.kubernetes.io/name: kmm-hub
app.kubernetes.io/part-of: kmm
control-plane: webhook-server
strategy: {}
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: webhook-server
labels:
app.kubernetes.io/component: kmm-hub
app.kubernetes.io/name: kmm-hub
app.kubernetes.io/part-of: kmm
control-plane: webhook-server
spec:
affinity:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@ spec:
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/component: kmm-hub
app.kubernetes.io/name: kmm-hub
app.kubernetes.io/part-of: kmm
control-plane: controller
status:
loadBalancer: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: controller
namespace: system
spec:
podSelector:
matchLabels:
control-plane: controller
policyTypes:
- Egress
- Ingress
ingress:
- ports:
- protocol: TCP # metrics port
port: 8443
- protocol: TCP
port: 8081 # Healthz
egress:
- to:
- namespaceSelector: # DNS
matchLabels:
kubernetes.io/metadata.name: openshift-dns
podSelector:
matchLabels:
dns.operator.openshift.io/daemonset-dns: default
ports:
- protocol: UDP # DNS
port: 53
- protocol: TCP # DNS
port: 53
- ports: # kube api server
- protocol: TCP
port: 6443
- protocol: TCP
Comment on lines +32 to +35
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

find . -path "*bundle-hub*" -name "*networkpolicy.yaml" -o -name "*network_policy*"

Repository: rh-ecosystem-edge/kernel-module-management

Length of output: 452


🏁 Script executed:

cat -n ./bundle-hub/manifests/kmm-operator-hub-controller_networking.k8s.io_v1_networkpolicy.yaml

Repository: rh-ecosystem-edge/kernel-module-management

Length of output: 1169


🏁 Script executed:

cat -n ./bundle-hub/manifests/kmm-operator-hub-default-deny_networking.k8s.io_v1_networkpolicy.yaml

Repository: rh-ecosystem-edge/kernel-module-management

Length of output: 315


🏁 Script executed:

# Verify the context of how these NetworkPolicies work together
# Check if there are any other resources that might be relevant
git log --oneline -n 5 ./bundle-hub/manifests/kmm-operator-hub-controller_networking.k8s.io_v1_networkpolicy.yaml

Repository: rh-ecosystem-edge/kernel-module-management

Length of output: 125


🏁 Script executed:

# Check the commit that added these network policies
git show b31da524 --stat | head -30

Repository: rh-ecosystem-edge/kernel-module-management

Length of output: 1548


Restrict egress to API servers with a destination selector.

The egress rule for Kubernetes API servers (lines 32-36) lacks a to selector, permitting egress to any destination on TCP ports 6443 and 443. In a default-deny context, this rule should restrict traffic to specific API server targets. Add a destination selector to limit traffic appropriately.

Consider applying this change:

    - ports: # kube api server
        - protocol: TCP
          port: 6443
        - protocol: TCP
          port: 443
+    to:
+      - namespaceSelector:
+          matchLabels:
+            kubernetes.io/metadata.name: default
+        podSelector:
+          matchLabels:
+            component: kube-apiserver

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In
bundle-hub/manifests/kmm-operator-hub-controller_networking.k8s.io_v1_networkpolicy.yaml
around lines 32-35, the egress rule opens TCP 6443 and 443 to any destination;
update the egress to include a destination selector by adding a "to" clause that
limits targets to the API servers (for example use namespaceSelector:
matchLabels: {kubernetes.io/metadata.name: kube-system} plus a podSelector that
matches the kube-apiserver pods, or if your cluster exposes API servers via
known IPs/CIDR use an ipBlock with the API server CIDR), ensuring only those
selected endpoints are allowed egress to ports 6443 and 443.

port: 443
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@ spec:
protocol: TCP
targetPort: 9443
selector:
app.kubernetes.io/component: kmm-hub
app.kubernetes.io/name: kmm-hub
app.kubernetes.io/part-of: kmm
control-plane: webhook-server
status:
loadBalancer: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: webhook
namespace: system
spec:
podSelector:
matchLabels:
control-plane: webhook-server
policyTypes:
- Egress
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9443
Comment on lines +13 to +16
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Ingress currently allows from anywhere; restrict to API server

With an ingress rule that only lists ports and no from peers, traffic on 9443 is allowed from any source. For admission webhooks we should scope ingress to the API server namespaces to reduce blast radius.

Example tightening (covers both vanilla and OpenShift):

   ingress:
-  - ports:
-    - protocol: TCP
-      port: 9443
+  - from:
+    - namespaceSelector:
+        matchLabels:
+          kubernetes.io/metadata.name: kube-system
+    - namespaceSelector:
+        matchLabels:
+          kubernetes.io/metadata.name: openshift-kube-apiserver
+    ports:
+    - protocol: TCP
+      port: 9443

If you prefer stricter scoping, we can further constrain with podSelector labels on the apiserver pods in those namespaces.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
ingress:
- ports:
- protocol: TCP
port: 9443
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: openshift-kube-apiserver
ports:
- protocol: TCP
port: 9443
🤖 Prompt for AI Agents
In
bundle-hub/manifests/kmm-operator-hub-webhook_networking.k8s.io_v1_networkpolicy.yaml
lines 13-16, the ingress rule lists only ports so port 9443 is open to any
source; add explicit from peers to restrict traffic to API server namespaces.
Modify the ingress stanza to include from entries for the API server namespaces
(e.g., a namespaceSelector with matchLabels: kubernetes.io/metadata.name:
kube-system and a second from entry for OpenShift API namespace like
matchLabels: kubernetes.io/metadata.name: openshift-apiserver / or
openshift-kube-apiserver as appropriate), and optionally add a podSelector
within those from entries to limit to apiserver pods; ensure multiple from
blocks are used so only requests originating from those namespaces/pods can
reach port 9443.

egress:
- ports: # kube api server port
- protocol: TCP
port: 6443
- protocol: TCP
port: 443
Original file line number Diff line number Diff line change
Expand Up @@ -352,19 +352,13 @@ spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kmm
app.kubernetes.io/name: kmm
app.kubernetes.io/part-of: kmm
control-plane: controller
strategy: {}
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
app.kubernetes.io/component: kmm
app.kubernetes.io/name: kmm
app.kubernetes.io/part-of: kmm
control-plane: controller
spec:
affinity:
Expand Down Expand Up @@ -451,19 +445,13 @@ spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kmm
app.kubernetes.io/name: kmm
app.kubernetes.io/part-of: kmm
control-plane: webhook-server
strategy: {}
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: webhook-server
labels:
app.kubernetes.io/component: kmm
app.kubernetes.io/name: kmm
app.kubernetes.io/part-of: kmm
control-plane: webhook-server
spec:
affinity:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@ spec:
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/component: kmm
app.kubernetes.io/name: kmm
app.kubernetes.io/part-of: kmm
control-plane: controller
status:
loadBalancer: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: controller
namespace: system
spec:
Comment on lines +3 to +6
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Bundle manifest should not hardcode metadata.namespace: system.

OLM typically applies bundle objects into the operator’s install namespace. Shipping a hardcoded namespace can cause install-time failures or resources landing in the wrong namespace. Recommend dropping the field in the bundled file (or have the bundle generator strip it).

 metadata:
   name: controller
-  namespace: system

🏁 Script executed:

#!/usr/bin/env bash
# Locate the NetworkPolicy manifest and print its metadata section
manifest="bundle/manifests/kmm-operator-controller_networking.k8s.io_v1_networkpolicy.yaml"
if [ -f "$manifest" ]; then
  echo "Showing lines 1-20 of $manifest:"
  sed -n '1,20p' "$manifest"
else
  echo "$manifest not found."
fi

Length of output: 871


Remove hardcoded namespace from NetworkPolicy manifest

The bundle’s NetworkPolicy manifest explicitly sets metadata.namespace: system, which will force the resource into the system namespace regardless of the operator’s install namespace. This can lead to install-time failures or resources landing in the wrong namespace. OLM by default applies namespaced objects into the operator’s install namespace, so you should omit the namespace field.

Please update the file as follows:

• File: bundle/manifests/kmm-operator-controller_networking.k8s.io_v1_networkpolicy.yaml
• Remove or comment out the namespace: system line under metadata.

Suggested diff:

 metadata:
   name: controller
-  namespace: system
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
metadata:
name: controller
namespace: system
spec:
metadata:
name: controller
spec:
🤖 Prompt for AI Agents
In
bundle/manifests/kmm-operator-controller_networking.k8s.io_v1_networkpolicy.yaml
around lines 3 to 6, remove the hardcoded metadata.namespace: system line so the
NetworkPolicy does not force the resource into a specific namespace; delete or
comment out that namespace entry under metadata and leave metadata.name only so
OLM can apply the object into the operator’s install namespace.

podSelector:
matchLabels:
control-plane: controller
policyTypes:
- Egress
- Ingress
ingress:
- ports:
- protocol: TCP # metrics port
port: 8443
- protocol: TCP
port: 8081 # Healthz
egress:
- to:
- namespaceSelector: # DNS
matchLabels:
kubernetes.io/metadata.name: openshift-dns
podSelector:
matchLabels:
dns.operator.openshift.io/daemonset-dns: default
ports:
- protocol: UDP # DNS
port: 53
- protocol: TCP # DNS
port: 53
- ports: # kube api server
- protocol: TCP
port: 6443
- protocol: TCP
port: 443
3 changes: 0 additions & 3 deletions bundle/manifests/kmm-operator-webhook-service_v1_service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@ spec:
protocol: TCP
targetPort: 9443
selector:
app.kubernetes.io/component: kmm
app.kubernetes.io/name: kmm
app.kubernetes.io/part-of: kmm
control-plane: webhook-server
status:
loadBalancer: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: webhook
namespace: system
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Hard-coded namespace "system" in a bundle may not match install namespace.

OLM typically installs namespaced resources into the operator’s target namespace. A hard-coded "system" can cause install failures or drift if the operator is installed into openshift-kmm (as your default overlay suggests).

Two safe options:

  • Prefer omitting metadata.namespace in the source NP so the bundle inherits the install namespace.
  • Or ensure the bundle generation step rewrites namespace to the operator’s install namespace.

Proposed change (if you choose to drop explicit namespace in source before bundling):

-  namespace: system

If you’d rather rewrite at bundle time, adjust hack/generate-bundle to strip or replace the namespace on copied NetworkPolicy YAMLs.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
namespace: system
🤖 Prompt for AI Agents
In bundle/manifests/kmm-operator-webhook_networking.k8s.io_v1_networkpolicy.yaml
around line 5 the NetworkPolicy has a hard-coded namespace: system which can
break installs into a different operator namespace; fix this by either removing
the metadata.namespace entry from the source NetworkPolicy so it inherits the
install namespace, or if you must keep per-bundle YAMLs, update
hack/generate-bundle to strip or replace metadata.namespace for NetworkPolicy
files during bundle generation (identify files by kind: NetworkPolicy and remove
or set namespace to the operator install namespace variable).

spec:
podSelector:
matchLabels:
control-plane: webhook-server
policyTypes:
- Egress
- Ingress
ingress:
- ports:
- protocol: TCP
port: 9443
egress:
- ports: # kube api server port
- protocol: TCP
port: 6443
- protocol: TCP
port: 443
2 changes: 2 additions & 0 deletions ci/e2e/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,10 @@ generatorOptions:

configMapGenerator:
- name: kmm-kmod-dockerfile
namespace: openshift-kmm
files: [dockerfile=Dockerfile]

secretGenerator:
- name: build-secret
namespace: openshift-kmm
literals: [ci-build-secret=super-secret-value]
Comment on lines 11 to 19
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Add namespace resource or document pre-requisite namespace creation.

The configMapGenerator and secretGenerator both specify namespace: openshift-kmm, but there's no explicit Namespace resource defined in this kustomization file. Either add a Namespace resource to this file to ensure the namespace is created before the resources are applied, or update documentation to confirm the e2e test setup creates the namespace as a prerequisite step.

🤖 Prompt for AI Agents
In ci/e2e/kustomization.yaml around lines 11 to 19, the configMapGenerator and
secretGenerator specify namespace: openshift-kmm but no Namespace resource is
created; add a Namespace resource entry to this kustomization (or include a
resources: - namespace.yaml that defines metadata.name: openshift-kmm) so the
namespace is created prior to the generated configmap/secret, or alternatively
update the CI/e2E docs to state that openshift-kmm must be created as a
prerequisite before applying this kustomization.

1 change: 1 addition & 0 deletions ci/e2e/module.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ apiVersion: kmm.sigs.x-k8s.io/v1beta1
kind: Module
metadata:
name: kmm-ci
namespace: openshift-kmm
spec:
moduleLoader:
container:
Expand Down
16 changes: 8 additions & 8 deletions ci/prow/e2e-incluster-build
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ POD_NAME=''

wait_for_pod_and_print_logs () {
# we can't exec a command nor get the logs on a pod that isn't `Running` yet.
oc wait "pod/${POD_NAME}" --for jsonpath='{.status.phase}'=Running --timeout=60s
oc wait -n openshift-kmm"pod/${POD_NAME}" --for jsonpath='{.status.phase}'=Running --timeout=60s
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix syntax error in the oc wait command.

There's a missing space between openshift-kmm and "pod/${POD_NAME}" which will cause a syntax error.

-  oc wait -n openshift-kmm"pod/${POD_NAME}" --for jsonpath='{.status.phase}'=Running --timeout=60s
+  oc wait -n openshift-kmm "pod/${POD_NAME}" --for jsonpath='{.status.phase}'=Running --timeout=60s
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
oc wait -n openshift-kmm"pod/${POD_NAME}" --for jsonpath='{.status.phase}'=Running --timeout=60s
oc wait -n openshift-kmm "pod/${POD_NAME}" --for jsonpath='{.status.phase}'=Running --timeout=60s
🤖 Prompt for AI Agents
ci/prow/e2e-incluster-build around line 9: the oc wait command lacks a space
between the namespace and the resource argument, causing a syntax error; fix by
inserting a space so the -n openshift-kmm flag and the pod/${POD_NAME} argument
are separate (or alternatively use the pod/NAME form), ensuring the command
syntax is valid and the --for/--timeout flags remain unchanged.


echo "Print pod ${POD_NAME} logs..."
oc logs "pod/${POD_NAME}" -f
oc logs -n openshift-kmm "pod/${POD_NAME}" -f
}

check_module_not_loaded () {
Expand Down Expand Up @@ -46,14 +46,14 @@ timeout 1m bash -c 'until oc apply -k ci/e2e; do sleep 3; done'
# Wait for the build pod to be created. `kubectl wait` doesn't support such option,
# see https://github.com/kubernetes/kubernetes/issues/83242.
echo "Waiting for the build pod to be created..."
timeout 1m bash -c 'until oc get pods -o json | jq -er ".items[].metadata.name | select(.? | match(\"build\"))"; do sleep 1; done'
POD_NAME=$(oc get pods -o json | jq -r '.items[].metadata.name | select(.? | match("build"))')
timeout 1m bash -c 'until oc get pods -n openshift-kmm -o json | jq -er ".items[].metadata.name | select(.? | match(\"build\"))"; do sleep 1; done'
POD_NAME=$(oc get pods -n openshift-kmm -o json | jq -r '.items[].metadata.name | select(.? | match("build"))')

wait_for_pod_and_print_logs

echo "Waiting for the signing pod to be created..."
timeout 1m bash -c 'until oc get pods -o json | jq -er ".items[].metadata.name | select(.? | match(\"sign\"))"; do sleep 1; done'
POD_NAME=$(oc get pods -o json | jq -r '.items[].metadata.name | select(.? | match("sign"))')
timeout 1m bash -c 'until oc get pods -n openshift-kmm -o json | jq -er ".items[].metadata.name | select(.? | match(\"sign\"))"; do sleep 1; done'
POD_NAME=$(oc get pods -n openshift-kmm -o json | jq -r '.items[].metadata.name | select(.? | match("sign"))')

wait_for_pod_and_print_logs

Expand All @@ -66,7 +66,7 @@ oc debug "node/${NODE}" -- chroot host/ lsmod | grep kmm_ci_b
check_module_not_loaded "dummy"

echo "Remove the Module..."
oc delete modules.kmm.sigs.x-k8s.io/kmm-ci --wait=false
oc delete modules.kmm.sigs.x-k8s.io/kmm-ci --wait=false -n openshift-kmm

echo "Check that the module gets unloaded from the node..."
timeout 1m bash -c 'until ! oc debug node/${NODE} -- chroot host/ lsmod | grep kmm_ci_a; do sleep 3; done'
Expand All @@ -75,4 +75,4 @@ echo "Check that the dependent module is also unloaded from the node..."
check_module_not_loaded "kmm_ci_b"

echo "Wait for the Module to be deleted..."
oc wait --for delete modules.kmm.sigs.x-k8s.io/kmm-ci
oc wait --for delete modules.kmm.sigs.x-k8s.io/kmm-ci -n openshift-kmm
2 changes: 2 additions & 0 deletions ci/sign-key-certs/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ generatorOptions:

secretGenerator:
- name: kmm-kmod-signing-cert
namespace: openshift-kmm
files: [cert=signing-cert.pem]
- name: kmm-kmod-signing-key
namespace: openshift-kmm
files: [key=signing-key.pem]
2 changes: 1 addition & 1 deletion config/default-hub/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ components:
configurations:
- kustomizeconfig.yaml
labels:
- includeSelectors: true
- includeSelectors: false
pairs:
app.kubernetes.io/component: kmm-hub
app.kubernetes.io/name: kmm-hub
Expand Down
2 changes: 1 addition & 1 deletion config/default/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namePrefix: kmm-operator-
configurations:
- kustomizeconfig.yaml
labels:
- includeSelectors: true
- includeSelectors: false
pairs:
app.kubernetes.io/component: kmm
app.kubernetes.io/name: kmm
Expand Down
Loading
Loading