From 7201f45bf383e232539afc29a55977f27059fb4e Mon Sep 17 00:00:00 2001 From: Jian Li Date: Wed, 22 Oct 2025 16:38:42 +0800 Subject: [PATCH 1/4] scripts: Implemented the first E2E test. Clientset instance can be used to connect to cluster, manage resources. With clientset instance we can evaluate the posibility of moving other tests to OTE. The reference to OTE framework: https://docs.google.com/document/d/1cFZj9QdzW8hbHc3H0Nce-2xrJMtpDJrwAse9H7hLiWk/edit?tab=t.0#heading=h.8cf3f4eii1q8 --- ...hift_payload_cluster-version-operator.json | 2 +- cmd/cluster-version-operator-tests/README.md | 20 ++++++++-- test/utilities/connection.go | 37 +++++++++++++++++++ 3 files changed, 54 insertions(+), 5 deletions(-) create mode 100644 test/utilities/connection.go diff --git a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json index 64960e141..02d1a1344 100644 --- a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json +++ b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json @@ -1,6 +1,6 @@ [ { - "name": "[Jira:\"Cluster Version Operator\"] cluster-version-operator-tests should support passing tests", + "name": "[Jira:\"Cluster Version Operator\"] cluster-version-operator-tests should support passing tests the sanity test should pass", "labels": {}, "resources": { "isolation": {} diff --git a/cmd/cluster-version-operator-tests/README.md b/cmd/cluster-version-operator-tests/README.md index 9c2a188b4..e348102ef 100644 --- a/cmd/cluster-version-operator-tests/README.md +++ b/cmd/cluster-version-operator-tests/README.md @@ -4,16 +4,28 @@ It integrates [openshift-tests-extension](https://github.com/openshift-eng/opens cluster-version-operator which allows openshift components to contribute tests to openshift-tests' suites with extension binaries. +## Build the executable binary +In root folder, run below command to build executable binary: +```console +$ make build +``` ## Run the tests locally -## Using the framework +### Using the binary +- run a test-suite +```console +$ _output///cluster-version-operator-tests run-suite +``` +where test suites can be listed by `_output///cluster-version-operator-tests info`. + +- run a single test case ```console -$ hack/build-go.sh -$ _output///cluster-version-operator-tests run-suite cluster-version-operator +$ _output///cluster-version-operator-tests run-test ``` +where test names can be listed by `_output///cluster-version-operator-tests list`. -## Using ginko-cli +### Using ginko-cli After [installing-ginkgo](https://onsi.github.io/ginkgo/#installing-ginkgo): diff --git a/test/utilities/connection.go b/test/utilities/connection.go new file mode 100644 index 000000000..f83133835 --- /dev/null +++ b/test/utilities/connection.go @@ -0,0 +1,37 @@ +package utilities + +import ( + "errors" + "fmt" + "os" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +// getKubeClient creates a Kubernetes clientset. +func getKubeClient() (*kubernetes.Clientset, error) { + configPath, present := os.LookupEnv("KUBECONFIG") + if !present { + return nil, errors.New("the environment variable KUBECONFIG must be set") + } + config, err := clientcmd.BuildConfigFromFlags("", configPath) + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + // Create the Clientset + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to create a Kubernetes clientset: %w", err) + } + return clientset, nil +} + +// MustGetKubeClient creates a Kubernetes clientset, or panics on failures. +func MustGetKubeClient() *kubernetes.Clientset { + clientset, err := getKubeClient() + if err != nil { + panic("unable to create a Kubernetes clientset: " + err.Error()) + } + return clientset +} From e35f937556aee52a3d99f7c9bf90df8dab35eb1b Mon Sep 17 00:00:00 2001 From: Jian Li Date: Wed, 12 Nov 2025 17:14:17 +0800 Subject: [PATCH 2/4] Finished first E2E test case by openshift/client-go and Kubernetes/client-go openshift/client-go gives us more efficient functions to access cluster --- ...hift_payload_cluster-version-operator.json | 2 +- cmd/cluster-version-operator-tests/README.md | 5 ++ test/cvo/cvo.go | 82 +++++++++++++++++-- test/utilities/connection.go | 40 ++++++++- 4 files changed, 120 insertions(+), 9 deletions(-) diff --git a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json index 02d1a1344..64960e141 100644 --- a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json +++ b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json @@ -1,6 +1,6 @@ [ { - "name": "[Jira:\"Cluster Version Operator\"] cluster-version-operator-tests should support passing tests the sanity test should pass", + "name": "[Jira:\"Cluster Version Operator\"] cluster-version-operator-tests should support passing tests", "labels": {}, "resources": { "isolation": {} diff --git a/cmd/cluster-version-operator-tests/README.md b/cmd/cluster-version-operator-tests/README.md index e348102ef..fbb2d934a 100644 --- a/cmd/cluster-version-operator-tests/README.md +++ b/cmd/cluster-version-operator-tests/README.md @@ -32,6 +32,11 @@ After [installing-ginkgo](https://onsi.github.io/ginkgo/#installing-ginkgo): ```console $ ginkgo ./test/... ``` +or run a specific test +```console +$ ginkgo --focus "" ./test/... +``` +`test case name` is the text in g.It() The output looks nicer this way. diff --git a/test/cvo/cvo.go b/test/cvo/cvo.go index 124f13e81..0f7991c09 100644 --- a/test/cvo/cvo.go +++ b/test/cvo/cvo.go @@ -1,12 +1,84 @@ package cvo import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + "context" + "fmt" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + v1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + "github.com/openshift/cluster-version-operator/test/utilities" ) -var _ = Describe(`[Jira:"Cluster Version Operator"] cluster-version-operator-tests`, func() { - It("should support passing tests", func() { - Expect(true).To(BeTrue()) +var _ = g.Describe(`[Jira:"Cluster Version Operator"] cluster-version-operator-tests`, func() { + g.It("should support passing tests", func() { + o.Expect(true).To(o.BeTrue()) + }) +}) + +var _ = g.Describe("[Jira:Cluster Version Operator] The cluster version operator", g.Ordered, g.Label("cvo"), func() { + defer g.GinkgoRecover() + var client *v1.ConfigV1Client + var kubeclient *kubernetes.Clientset + + g.BeforeAll(func() { + client = utilities.MustGetV1Client() + kubeclient = utilities.MustGetKubeClient() + }) + + g.It(`should not install resources annotated with release.openshift.io/delete=true`, g.Label("Conformance", "High", "42543"), func() { + annotation := "release.openshift.io/delete" + + auths, err := client.Authentications().List(context.TODO(), metav1.ListOptions{}) + o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing authentications") + + g.By(fmt.Sprintf("checking if authentication with %s annotation exists", annotation)) + for _, auth := range auths.Items { + if _, ok := auth.Annotations[annotation]; ok { + o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed authentication %s which has '%s' annotation", auth.Name, annotation)) + } + } + + namespaces, err := kubeclient.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing namespaces") + + g.By(fmt.Sprintf("checking if special resources with %s annotation exist in all namespaces", annotation)) + for _, ns := range namespaces.Items { + namespace := ns.Name + fmt.Printf("namespace: %s\n", namespace) + + fmt.Println(" - Test services...") + services, err := kubeclient.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) + o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing services") + for _, service := range services.Items { + if _, ok := service.Annotations[annotation]; ok { + o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed service %s which has '%s' annotation", service.Name, annotation)) + } + } + + fmt.Println(" - Test RoleBinding...") + rolebindings, err := kubeclient.RbacV1().RoleBindings(namespace).List(context.TODO(), metav1.ListOptions{}) + o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing rolebindings") + for _, rb := range rolebindings.Items { + if _, ok := rb.Annotations[annotation]; ok { + o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed RoleBinding %s which has '%s' annotation", rb.Name, annotation)) + } + } + + fmt.Println(" - Test CronJob...") + cronjobs, err := kubeclient.BatchV1().CronJobs(namespace).List(context.TODO(), metav1.ListOptions{}) + o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing cronjobs") + for _, cj := range cronjobs.Items { + if _, ok := cj.Annotations[annotation]; ok { + o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed CronJob %s which has %s annotation", cj.Name, annotation)) + } + } + + fmt.Println("success") + } }) }) diff --git a/test/utilities/connection.go b/test/utilities/connection.go index f83133835..69d354365 100644 --- a/test/utilities/connection.go +++ b/test/utilities/connection.go @@ -6,16 +6,25 @@ import ( "os" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + + configclientv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" ) -// getKubeClient creates a Kubernetes clientset. -func getKubeClient() (*kubernetes.Clientset, error) { +// getKubeConfig get KUBECONFIG file from environment variable +func getKubeConfig() (*rest.Config, error) { configPath, present := os.LookupEnv("KUBECONFIG") if !present { return nil, errors.New("the environment variable KUBECONFIG must be set") } config, err := clientcmd.BuildConfigFromFlags("", configPath) + return config, err +} + +// getKubeClient creates a kubernetes.Clientset instance. +func getKubeClient() (*kubernetes.Clientset, error) { + config, err := getKubeConfig() if err != nil { return nil, fmt.Errorf("unable to load build config: %w", err) } @@ -24,10 +33,26 @@ func getKubeClient() (*kubernetes.Clientset, error) { if err != nil { return nil, fmt.Errorf("unable to create a Kubernetes clientset: %w", err) } + return clientset, nil } -// MustGetKubeClient creates a Kubernetes clientset, or panics on failures. +// getV1Client creates a configclientv1.ConfigV1Client instance. +func getV1Client() (*configclientv1.ConfigV1Client, error) { + config, err := getKubeConfig() + if err != nil { + return nil, fmt.Errorf("unable to load build config: %w", err) + } + // Create the Clientset + clientset, err := configclientv1.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to create a configclientv1 clientset: %w", err) + } + + return clientset, nil +} + +// MustGetKubeClient creates a kubernetes.Clientset instance, or panics on failures. func MustGetKubeClient() *kubernetes.Clientset { clientset, err := getKubeClient() if err != nil { @@ -35,3 +60,12 @@ func MustGetKubeClient() *kubernetes.Clientset { } return clientset } + +// MustGetV1Client creates a configclientv1.ConfigV1Client instance, or panics on failures. +func MustGetV1Client() *configclientv1.ConfigV1Client { + clientset, err := getV1Client() + if err != nil { + panic("unable to create a configclientv1 clientset: " + err.Error()) + } + return clientset +} From 946876f9912d7d3cdee5cdd0d3c41575369a9e6b Mon Sep 17 00:00:00 2001 From: Jian Li Date: Mon, 17 Nov 2025 17:53:14 +0800 Subject: [PATCH 3/4] test: Manually adding test metadata After adding new OTE test cases, we need to update test metadata --- ...hift_payload_cluster-version-operator.json | 15 +++++++++++ cmd/cluster-version-operator-tests/README.md | 25 +++---------------- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json index 64960e141..3a4637172 100644 --- a/.openshift-tests-extension/openshift_payload_cluster-version-operator.json +++ b/.openshift-tests-extension/openshift_payload_cluster-version-operator.json @@ -8,5 +8,20 @@ "source": "openshift:payload:cluster-version-operator", "lifecycle": "blocking", "environmentSelector": {} + }, + { + "name": "[Jira:Cluster Version Operator] The cluster version operator should not install resources annotated with release.openshift.io/delete=true", + "labels": { + "42543": {}, + "Conformance": {}, + "High": {}, + "cvo": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:cluster-version-operator", + "lifecycle": "blocking", + "environmentSelector": {} } ] \ No newline at end of file diff --git a/cmd/cluster-version-operator-tests/README.md b/cmd/cluster-version-operator-tests/README.md index fbb2d934a..9c2a188b4 100644 --- a/cmd/cluster-version-operator-tests/README.md +++ b/cmd/cluster-version-operator-tests/README.md @@ -4,39 +4,22 @@ It integrates [openshift-tests-extension](https://github.com/openshift-eng/opens cluster-version-operator which allows openshift components to contribute tests to openshift-tests' suites with extension binaries. -## Build the executable binary -In root folder, run below command to build executable binary: -```console -$ make build -``` ## Run the tests locally -### Using the binary -- run a test-suite +## Using the framework ```console -$ _output///cluster-version-operator-tests run-suite +$ hack/build-go.sh +$ _output///cluster-version-operator-tests run-suite cluster-version-operator ``` -where test suites can be listed by `_output///cluster-version-operator-tests info`. -- run a single test case -```console -$ _output///cluster-version-operator-tests run-test -``` -where test names can be listed by `_output///cluster-version-operator-tests list`. - -### Using ginko-cli +## Using ginko-cli After [installing-ginkgo](https://onsi.github.io/ginkgo/#installing-ginkgo): ```console $ ginkgo ./test/... ``` -or run a specific test -```console -$ ginkgo --focus "" ./test/... -``` -`test case name` is the text in g.It() The output looks nicer this way. From 338557b4101f18a907135aff03431375e393d703 Mon Sep 17 00:00:00 2001 From: Jian Li Date: Mon, 24 Nov 2025 18:55:38 +0800 Subject: [PATCH 4/4] client-go demo This is the demo to show how we use client-go to automate a test case. --- test/cvo/cvo.go | 170 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) diff --git a/test/cvo/cvo.go b/test/cvo/cvo.go index 0f7991c09..c45a1b528 100644 --- a/test/cvo/cvo.go +++ b/test/cvo/cvo.go @@ -2,18 +2,86 @@ package cvo import ( "context" + "errors" "fmt" + "log" + "time" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + clientmetav1 "k8s.io/client-go/applyconfigurations/meta/v1" + applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1" "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" v1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" "github.com/openshift/cluster-version-operator/test/utilities" ) +func CreateServiceAccount(client *kubernetes.Clientset, accountName string, clusterRole string, namespace string) (token string, err error) { + + _, err = client.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), accountName, metav1.GetOptions{}) + + if err == nil { + token, err := client.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), accountName, &authenticationv1.TokenRequest{}, metav1.CreateOptions{}) + return token.String(), err + } + + account := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: accountName, + Namespace: namespace, + }, + } + _, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), account, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + rb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s:%s:%s", namespace, clusterRole, accountName), + Namespace: namespace, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: accountName, + Namespace: namespace, + }, + }, + } + _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), rb, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + newToken, err := client.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), accountName, &authenticationv1.TokenRequest{}, metav1.CreateOptions{}) + return newToken.String(), err +} + +func DeleteServiceAccount(client *kubernetes.Clientset, accountName string, clusterRole string, namespace string) { + name := fmt.Sprintf("%s:%s:%s", namespace, clusterRole, accountName) + err := client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + panic("failed to delete ClusterRoleBindings") + } + + err = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), accountName, metav1.DeleteOptions{}) + if err != nil { + panic("failed to delete ServiceAccount") + } +} + var _ = g.Describe(`[Jira:"Cluster Version Operator"] cluster-version-operator-tests`, func() { g.It("should support passing tests", func() { o.Expect(true).To(o.BeTrue()) @@ -81,4 +149,106 @@ var _ = g.Describe("[Jira:Cluster Version Operator] The cluster version operator fmt.Println("success") } }) + + g.It(`Precheck with oc adm upgrade recommend`, g.Label("Conformance", "Low", "70980"), func() { + + g.By("create a namespace") + ns := "ns-70980" + tmpNs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}} + kubeclient.CoreV1().Namespaces().Create(context.TODO(), tmpNs, metav1.CreateOptions{}) + + defer func() { + kubeclient.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) + }() + + g.By("create a deployment") + deploymentName := "hello-openshift" + containerName := "hello-openshift" + containerImage := "openshift/hello-openshift:invaid" + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(2)), // Number of desired replicas + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": containerName, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": containerName, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: containerName, + Image: containerImage, + Ports: []corev1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + }, + }, + }, + } + kubeclient.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{}) + + defer func() { + kubeclient.AppsV1().Deployments(ns).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) + }() + + err := wait.Poll(1*time.Minute, 3*time.Minute, func() (bool, error) { + allPods, err := kubeclient.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + log.Fatalf("Error listing pods: %v", err) + } + for _, pod := range allPods.Items { + if pod.Status.Phase == corev1.PodRunning { + return true, errors.New("there are pods running: " + pod.Name) + } + } + return true, nil + }) + allPods, _ := kubeclient.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + fmt.Printf("there are %v pods\n", len(allPods.Items)) + for _, pod := range allPods.Items { + fmt.Printf(" - Pod: %s - %s\n", pod.Name, pod.Status.Phase) + } + o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should not occur") + + g.By("create a PodDisruptionBudget") + pdbName := "my-pdb" + pdb := &applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration{ + ObjectMetaApplyConfiguration: &clientmetav1.ObjectMetaApplyConfiguration{ + Name: &pdbName, + Namespace: &ns, + }, + Spec: &applyconfigurationspolicyv1.PodDisruptionBudgetSpecApplyConfiguration{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 1, + }, + }, + } + kubeclient.PolicyV1().PodDisruptionBudgets(ns).Apply(context.TODO(), pdb, metav1.ApplyOptions{}) + + defer func() { + kubeclient.PolicyV1().PodDisruptionBudgets(ns).Delete(context.TODO(), pdbName, metav1.DeleteOptions{}) + }() + + g.By("wait some minutes, there is a critical issue for PDB") + token, _ := CreateServiceAccount(kubeclient, "monitorer", "cluster-admin", ns) + defer func() { + DeleteServiceAccount(kubeclient, "monitorer", "cluster-admin", ns) + }() + // TODO: get alert + fmt.Println(token) + }) })