-
Notifications
You must be signed in to change notification settings - Fork 211
NO-ISSUE: OTA-1605 Create first e2e test OCP-42543 #1249
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,12 +1,254 @@ | ||
| package cvo | ||
|
|
||
| import ( | ||
| . "github.com/onsi/ginkgo/v2" | ||
| . "github.com/onsi/gomega" | ||
| "context" | ||
| "errors" | ||
| "fmt" | ||
| "log" | ||
| "time" | ||
|
|
||
| g "github.com/onsi/ginkgo/v2" | ||
| o "github.com/onsi/gomega" | ||
| appsv1 "k8s.io/api/apps/v1" | ||
| authenticationv1 "k8s.io/api/authentication/v1" | ||
| corev1 "k8s.io/api/core/v1" | ||
| rbacv1 "k8s.io/api/rbac/v1" | ||
| kerrors "k8s.io/apimachinery/pkg/api/errors" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| "k8s.io/apimachinery/pkg/util/intstr" | ||
| "k8s.io/apimachinery/pkg/util/wait" | ||
| clientmetav1 "k8s.io/client-go/applyconfigurations/meta/v1" | ||
| applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1" | ||
| "k8s.io/client-go/kubernetes" | ||
| "k8s.io/utils/ptr" | ||
|
|
||
| v1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" | ||
| "github.com/openshift/cluster-version-operator/test/utilities" | ||
| ) | ||
|
|
||
| var _ = Describe(`[Jira:"Cluster Version Operator"] cluster-version-operator-tests`, func() { | ||
| It("should support passing tests", func() { | ||
| Expect(true).To(BeTrue()) | ||
| func CreateServiceAccount(client *kubernetes.Clientset, accountName string, clusterRole string, namespace string) (token string, err error) { | ||
|
|
||
| _, err = client.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), accountName, metav1.GetOptions{}) | ||
|
|
||
| if err == nil { | ||
| token, err := client.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), accountName, &authenticationv1.TokenRequest{}, metav1.CreateOptions{}) | ||
| return token.String(), err | ||
| } | ||
|
|
||
| account := &corev1.ServiceAccount{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: accountName, | ||
| Namespace: namespace, | ||
| }, | ||
| } | ||
| _, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), account, metav1.CreateOptions{}) | ||
| o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
|
||
| rb := &rbacv1.ClusterRoleBinding{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: fmt.Sprintf("%s:%s:%s", namespace, clusterRole, accountName), | ||
| Namespace: namespace, | ||
| }, | ||
| RoleRef: rbacv1.RoleRef{ | ||
| APIGroup: "rbac.authorization.k8s.io", | ||
| Kind: "ClusterRole", | ||
| Name: "cluster-admin", | ||
| }, | ||
| Subjects: []rbacv1.Subject{ | ||
| { | ||
| Kind: "ServiceAccount", | ||
| Name: accountName, | ||
| Namespace: namespace, | ||
| }, | ||
| }, | ||
| } | ||
| _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), rb, metav1.CreateOptions{}) | ||
| o.Expect(err).NotTo(o.HaveOccurred()) | ||
|
|
||
| newToken, err := client.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), accountName, &authenticationv1.TokenRequest{}, metav1.CreateOptions{}) | ||
| return newToken.String(), err | ||
| } | ||
|
|
||
| func DeleteServiceAccount(client *kubernetes.Clientset, accountName string, clusterRole string, namespace string) { | ||
| name := fmt.Sprintf("%s:%s:%s", namespace, clusterRole, accountName) | ||
| err := client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), name, metav1.DeleteOptions{}) | ||
| if err != nil { | ||
| panic("failed to delete ClusterRoleBindings") | ||
| } | ||
|
|
||
| err = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), accountName, metav1.DeleteOptions{}) | ||
| if err != nil { | ||
| panic("failed to delete ServiceAccount") | ||
| } | ||
| } | ||
|
|
||
| var _ = g.Describe(`[Jira:"Cluster Version Operator"] cluster-version-operator-tests`, func() { | ||
| g.It("should support passing tests", func() { | ||
| o.Expect(true).To(o.BeTrue()) | ||
| }) | ||
| }) | ||
|
|
||
| var _ = g.Describe("[Jira:Cluster Version Operator] The cluster version operator", g.Ordered, g.Label("cvo"), func() { | ||
| defer g.GinkgoRecover() | ||
| var client *v1.ConfigV1Client | ||
| var kubeclient *kubernetes.Clientset | ||
|
|
||
| g.BeforeAll(func() { | ||
| client = utilities.MustGetV1Client() | ||
| kubeclient = utilities.MustGetKubeClient() | ||
| }) | ||
|
|
||
| g.It(`should not install resources annotated with release.openshift.io/delete=true`, g.Label("Conformance", "High", "42543"), func() { | ||
| annotation := "release.openshift.io/delete" | ||
|
|
||
| auths, err := client.Authentications().List(context.TODO(), metav1.ListOptions{}) | ||
| o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing authentications") | ||
|
|
||
| g.By(fmt.Sprintf("checking if authentication with %s annotation exists", annotation)) | ||
| for _, auth := range auths.Items { | ||
| if _, ok := auth.Annotations[annotation]; ok { | ||
| o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed authentication %s which has '%s' annotation", auth.Name, annotation)) | ||
| } | ||
| } | ||
|
|
||
| namespaces, err := kubeclient.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) | ||
| o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing namespaces") | ||
|
|
||
| g.By(fmt.Sprintf("checking if special resources with %s annotation exist in all namespaces", annotation)) | ||
| for _, ns := range namespaces.Items { | ||
| namespace := ns.Name | ||
| fmt.Printf("namespace: %s\n", namespace) | ||
|
|
||
| fmt.Println(" - Test services...") | ||
| services, err := kubeclient.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) | ||
| o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing services") | ||
| for _, service := range services.Items { | ||
| if _, ok := service.Annotations[annotation]; ok { | ||
| o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed service %s which has '%s' annotation", service.Name, annotation)) | ||
| } | ||
| } | ||
|
|
||
| fmt.Println(" - Test RoleBinding...") | ||
| rolebindings, err := kubeclient.RbacV1().RoleBindings(namespace).List(context.TODO(), metav1.ListOptions{}) | ||
| o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing rolebindings") | ||
| for _, rb := range rolebindings.Items { | ||
| if _, ok := rb.Annotations[annotation]; ok { | ||
| o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed RoleBinding %s which has '%s' annotation", rb.Name, annotation)) | ||
| } | ||
| } | ||
|
|
||
| fmt.Println(" - Test CronJob...") | ||
| cronjobs, err := kubeclient.BatchV1().CronJobs(namespace).List(context.TODO(), metav1.ListOptions{}) | ||
| o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing cronjobs") | ||
| for _, cj := range cronjobs.Items { | ||
| if _, ok := cj.Annotations[annotation]; ok { | ||
| o.Expect(ok).NotTo(o.BeTrue(), fmt.Sprintf("Unexpectedly installed CronJob %s which has %s annotation", cj.Name, annotation)) | ||
| } | ||
| } | ||
|
|
||
| fmt.Println("success") | ||
| } | ||
|
Comment on lines
+91
to
+150
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chainFix List error handling to fail on real API errors, not just NotFound All the List calls currently do: auths, err := client.Authentications().List(...)
o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing ...")This has two problems:
You want the test to fail on any error from those List calls. A more robust pattern is to assert on - auths, err := client.Authentications().List(context.TODO(), metav1.ListOptions{})
- o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing authentications")
+ auths, err := client.Authentications().List(context.TODO(), metav1.ListOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred(), "failed to list authentications")
- namespaces, err := kubeclient.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
- o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing namespaces")
+ namespaces, err := kubeclient.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred(), "failed to list namespaces")
- services, err := kubeclient.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{})
- o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing services")
+ services, err := kubeclient.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred(), "failed to list services")
- rolebindings, err := kubeclient.RbacV1().RoleBindings(namespace).List(context.TODO(), metav1.ListOptions{})
- o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing rolebindings")
+ rolebindings, err := kubeclient.RbacV1().RoleBindings(namespace).List(context.TODO(), metav1.ListOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred(), "failed to list rolebindings")
- cronjobs, err := kubeclient.BatchV1().CronJobs(namespace).List(context.TODO(), metav1.ListOptions{})
- o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing cronjobs")
+ cronjobs, err := kubeclient.BatchV1().CronJobs(namespace).List(context.TODO(), metav1.ListOptions{})
+ o.Expect(err).NotTo(o.HaveOccurred(), "failed to list cronjobs")This way, any failure to talk to the API (including If you want to double‑check semantics, please confirm in the Kubernetes/client-go docs (or local experiments) that 🌐 Web query: 💡 Result: Short answer: List returns an empty list with err == nil (the returned List object's Items slice is just empty). IsNotFound is not used for a successful List — it's used when an operation asking for a specific named resource (e.g., Get or Delete by name) returns HTTP 404; k8s.io/apimachinery/pkg/api/errors.IsNotFound(err) detects that 404 StatusError. Use IsNotFound when you expect a single resource might not exist and you want to handle "resource missing" specially. References: k8s error helpers (IsNotFound). [1] Client List/Get semantics (List returns list object; Reader/List returns error only on failure). [2] REST practice: collection endpoints return 200 + empty array when no items. [3] Sources: Replace error assertions with proper nil checks for all List calls The web search confirms your analysis: Kubernetes This means the current code has a critical flaw: o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing ...")—will pass whenever
Your proposed fix is correct. Replace all five List error checks in -o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should occur when listing ...")
+o.Expect(err).NotTo(o.HaveOccurred(), "failed to list ...")This ensures any API failure is caught, not silently suppressed. 🤖 Prompt for AI Agents |
||
| }) | ||
|
|
||
| g.It(`Precheck with oc adm upgrade recommend`, g.Label("Conformance", "Low", "70980"), func() { | ||
|
|
||
| g.By("create a namespace") | ||
| ns := "ns-70980" | ||
| tmpNs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}} | ||
| kubeclient.CoreV1().Namespaces().Create(context.TODO(), tmpNs, metav1.CreateOptions{}) | ||
|
|
||
| defer func() { | ||
| kubeclient.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) | ||
| }() | ||
|
|
||
| g.By("create a deployment") | ||
| deploymentName := "hello-openshift" | ||
| containerName := "hello-openshift" | ||
| containerImage := "openshift/hello-openshift:invaid" | ||
| deployment := &appsv1.Deployment{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: deploymentName, | ||
| }, | ||
| Spec: appsv1.DeploymentSpec{ | ||
| Replicas: ptr.To(int32(2)), // Number of desired replicas | ||
| Selector: &metav1.LabelSelector{ | ||
| MatchLabels: map[string]string{ | ||
| "app": containerName, | ||
| }, | ||
| }, | ||
| Template: corev1.PodTemplateSpec{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Labels: map[string]string{ | ||
| "app": containerName, | ||
| }, | ||
| }, | ||
| Spec: corev1.PodSpec{ | ||
| Containers: []corev1.Container{ | ||
| { | ||
| Name: containerName, | ||
| Image: containerImage, | ||
| Ports: []corev1.ContainerPort{ | ||
| { | ||
| ContainerPort: 80, | ||
| }, | ||
| }, | ||
| }, | ||
| }, | ||
| }, | ||
| }, | ||
| }, | ||
| } | ||
| kubeclient.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{}) | ||
|
|
||
| defer func() { | ||
| kubeclient.AppsV1().Deployments(ns).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) | ||
| }() | ||
|
|
||
| err := wait.Poll(1*time.Minute, 3*time.Minute, func() (bool, error) { | ||
| allPods, err := kubeclient.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) | ||
| if err != nil { | ||
| log.Fatalf("Error listing pods: %v", err) | ||
| } | ||
| for _, pod := range allPods.Items { | ||
| if pod.Status.Phase == corev1.PodRunning { | ||
| return true, errors.New("there are pods running: " + pod.Name) | ||
| } | ||
| } | ||
| return true, nil | ||
| }) | ||
| allPods, _ := kubeclient.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) | ||
| fmt.Printf("there are %v pods\n", len(allPods.Items)) | ||
| for _, pod := range allPods.Items { | ||
| fmt.Printf(" - Pod: %s - %s\n", pod.Name, pod.Status.Phase) | ||
| } | ||
| o.Expect(kerrors.IsNotFound(err)).To(o.BeFalse(), "The NotFound error should not occur") | ||
|
|
||
| g.By("create a PodDisruptionBudget") | ||
| pdbName := "my-pdb" | ||
| pdb := &applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration{ | ||
| ObjectMetaApplyConfiguration: &clientmetav1.ObjectMetaApplyConfiguration{ | ||
| Name: &pdbName, | ||
| Namespace: &ns, | ||
| }, | ||
| Spec: &applyconfigurationspolicyv1.PodDisruptionBudgetSpecApplyConfiguration{ | ||
| MaxUnavailable: &intstr.IntOrString{ | ||
| Type: intstr.Int, | ||
| IntVal: 1, | ||
| }, | ||
| }, | ||
| } | ||
| kubeclient.PolicyV1().PodDisruptionBudgets(ns).Apply(context.TODO(), pdb, metav1.ApplyOptions{}) | ||
|
|
||
| defer func() { | ||
| kubeclient.PolicyV1().PodDisruptionBudgets(ns).Delete(context.TODO(), pdbName, metav1.DeleteOptions{}) | ||
| }() | ||
|
|
||
| g.By("wait some minutes, there is a critical issue for PDB") | ||
| token, _ := CreateServiceAccount(kubeclient, "monitorer", "cluster-admin", ns) | ||
| defer func() { | ||
| DeleteServiceAccount(kubeclient, "monitorer", "cluster-admin", ns) | ||
| }() | ||
| // TODO: get alert | ||
| fmt.Println(token) | ||
| }) | ||
| }) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,71 @@ | ||
| package utilities | ||
|
|
||
| import ( | ||
| "errors" | ||
| "fmt" | ||
| "os" | ||
|
|
||
| "k8s.io/client-go/kubernetes" | ||
| "k8s.io/client-go/rest" | ||
| "k8s.io/client-go/tools/clientcmd" | ||
|
|
||
| configclientv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" | ||
| ) | ||
|
|
||
| // getKubeConfig get KUBECONFIG file from environment variable | ||
| func getKubeConfig() (*rest.Config, error) { | ||
| configPath, present := os.LookupEnv("KUBECONFIG") | ||
| if !present { | ||
| return nil, errors.New("the environment variable KUBECONFIG must be set") | ||
| } | ||
| config, err := clientcmd.BuildConfigFromFlags("", configPath) | ||
| return config, err | ||
| } | ||
|
|
||
| // getKubeClient creates a kubernetes.Clientset instance. | ||
| func getKubeClient() (*kubernetes.Clientset, error) { | ||
| config, err := getKubeConfig() | ||
| if err != nil { | ||
| return nil, fmt.Errorf("unable to load build config: %w", err) | ||
| } | ||
| // Create the Clientset | ||
| clientset, err := kubernetes.NewForConfig(config) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("unable to create a Kubernetes clientset: %w", err) | ||
| } | ||
|
|
||
| return clientset, nil | ||
| } | ||
|
|
||
| // getV1Client creates a configclientv1.ConfigV1Client instance. | ||
| func getV1Client() (*configclientv1.ConfigV1Client, error) { | ||
| config, err := getKubeConfig() | ||
| if err != nil { | ||
| return nil, fmt.Errorf("unable to load build config: %w", err) | ||
| } | ||
| // Create the Clientset | ||
| clientset, err := configclientv1.NewForConfig(config) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("unable to create a configclientv1 clientset: %w", err) | ||
| } | ||
|
|
||
| return clientset, nil | ||
| } | ||
|
|
||
| // MustGetKubeClient creates a kubernetes.Clientset instance, or panics on failures. | ||
| func MustGetKubeClient() *kubernetes.Clientset { | ||
| clientset, err := getKubeClient() | ||
| if err != nil { | ||
| panic("unable to create a Kubernetes clientset: " + err.Error()) | ||
| } | ||
| return clientset | ||
| } | ||
|
|
||
| // MustGetV1Client creates a configclientv1.ConfigV1Client instance, or panics on failures. | ||
| func MustGetV1Client() *configclientv1.ConfigV1Client { | ||
| clientset, err := getV1Client() | ||
| if err != nil { | ||
| panic("unable to create a configclientv1 clientset: " + err.Error()) | ||
| } | ||
| return clientset | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Replace panic calls with proper error handling or Gomega assertions.
Using
panicfor error handling in test helpers makes debugging more difficult and doesn't integrate well with Ginkgo's failure reporting. Consider either returning errors for the caller to handle or using Gomega assertions (o.Expect(err).NotTo(o.HaveOccurred())) for clearer test failure messages.Apply this diff:
func DeleteServiceAccount(client *kubernetes.Clientset, accountName string, clusterRole string, namespace string) { name := fmt.Sprintf("%s:%s:%s", namespace, clusterRole, accountName) err := client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - panic("failed to delete ClusterRoleBindings") - } + o.Expect(err).NotTo(o.HaveOccurred(), "failed to delete ClusterRoleBinding %s", name) err = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), accountName, metav1.DeleteOptions{}) - if err != nil { - panic("failed to delete ServiceAccount") - } + o.Expect(err).NotTo(o.HaveOccurred(), "failed to delete ServiceAccount %s/%s", namespace, accountName) }📝 Committable suggestion