Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
184 changes: 184 additions & 0 deletions test/integration/api/appliedmanifestwork_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
// Copyright Contributors to the Open Cluster Management project
package api

import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/errors"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
workv1 "open-cluster-management.io/api/work/v1"
)

var _ = ginkgo.Describe("AppliedManifestWork v1 API test", func() {
var appliedManifestWorkName string

ginkgo.BeforeEach(func() {
suffix := rand.String(5)
appliedManifestWorkName = fmt.Sprintf("appliedmanifestwork-%s", suffix)
})

ginkgo.AfterEach(func() {
err := hubWorkClient.WorkV1().AppliedManifestWorks().Delete(context.TODO(), appliedManifestWorkName, metav1.DeleteOptions{})
if !errors.IsNotFound(err) {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
}
})

ginkgo.Context("AppliedManifestWork creation and validation", func() {
ginkgo.It("should create AppliedManifestWork with basic spec", func() {
appliedWork := &workv1.AppliedManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: appliedManifestWorkName,
},
Spec: workv1.AppliedManifestWorkSpec{
HubHash: "test-hub-hash",
AgentID: "test-agent",
ManifestWorkName: "test-manifestwork",
},
}

_, err := hubWorkClient.WorkV1().AppliedManifestWorks().Create(context.TODO(), appliedWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})

ginkgo.It("should handle AppliedManifestWork with applied resources", func() {
appliedWork := &workv1.AppliedManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: appliedManifestWorkName,
},
Spec: workv1.AppliedManifestWorkSpec{
HubHash: "test-hub-hash",
AgentID: "test-agent",
ManifestWorkName: "test-manifestwork",
},
}

_, err := hubWorkClient.WorkV1().AppliedManifestWorks().Create(context.TODO(), appliedWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
})

ginkgo.Context("AppliedManifestWork status validation", func() {
ginkgo.It("should allow status updates with applied resource status", func() {
appliedWork := &workv1.AppliedManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: appliedManifestWorkName,
},
Spec: workv1.AppliedManifestWorkSpec{
HubHash: "test-hub-hash",
AgentID: "test-agent",
ManifestWorkName: "test-manifestwork",
},
}

appliedManifestWork, err := hubWorkClient.WorkV1().AppliedManifestWorks().Create(context.TODO(), appliedWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

// Update status
appliedManifestWork.Status = workv1.AppliedManifestWorkStatus{
AppliedResources: []workv1.AppliedManifestResourceMeta{
{
ResourceIdentifier: workv1.ResourceIdentifier{
Group: "",
Resource: "configmaps",
Name: "test-configmap",
Namespace: "default",
},
Version: "v1",
UID: "test-uid-123",
},
},
}

_, err = hubWorkClient.WorkV1().AppliedManifestWorks().UpdateStatus(context.TODO(), appliedManifestWork, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})

ginkgo.It("should handle complex status with multiple resources", func() {
appliedWork := &workv1.AppliedManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: appliedManifestWorkName,
},
Spec: workv1.AppliedManifestWorkSpec{
HubHash: "test-hub-hash",
AgentID: "test-agent",
ManifestWorkName: "test-manifestwork",
},
}

appliedManifestWork, err := hubWorkClient.WorkV1().AppliedManifestWorks().Create(context.TODO(), appliedWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

// Update with complex status
appliedManifestWork.Status = workv1.AppliedManifestWorkStatus{
AppliedResources: []workv1.AppliedManifestResourceMeta{
{
ResourceIdentifier: workv1.ResourceIdentifier{
Group: "",
Resource: "configmaps",
Name: "test-configmap",
Namespace: "default",
},
Version: "v1",
UID: "configmap-uid-123",
},
{
ResourceIdentifier: workv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Name: "test-deployment",
Namespace: "default",
},
Version: "v1",
UID: "deployment-uid-456",
},
},
}

updatedWork, err := hubWorkClient.WorkV1().AppliedManifestWorks().UpdateStatus(context.TODO(), appliedManifestWork, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(len(updatedWork.Status.AppliedResources)).Should(gomega.Equal(2))
})
})

ginkgo.Context("AppliedManifestWork validation edge cases", func() {
ginkgo.It("should create with required fields", func() {
appliedWork := &workv1.AppliedManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: appliedManifestWorkName,
},
Spec: workv1.AppliedManifestWorkSpec{
HubHash: "test-hub-hash",
AgentID: "test-agent",
ManifestWorkName: "test-manifestwork",
},
}

createdAppliedWork, err := hubWorkClient.WorkV1().AppliedManifestWorks().Create(context.TODO(), appliedWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(createdAppliedWork.Spec.HubHash).Should(gomega.Equal("test-hub-hash"))
gomega.Expect(createdAppliedWork.Spec.AgentID).Should(gomega.Equal("test-agent"))
})

ginkgo.It("should handle empty applied resources list", func() {
appliedWork := &workv1.AppliedManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: appliedManifestWorkName,
},
Spec: workv1.AppliedManifestWorkSpec{
HubHash: "test-hub-hash",
AgentID: "test-agent",
ManifestWorkName: "test-manifestwork",
},
}

appliedManifestWork, err := hubWorkClient.WorkV1().AppliedManifestWorks().Create(context.TODO(), appliedWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(appliedManifestWork.Spec.HubHash).Should(gomega.Equal("test-hub-hash"))
})
})
})
179 changes: 179 additions & 0 deletions test/integration/api/clustermanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@ package api
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
operatorv1 "open-cluster-management.io/api/operator/v1"
Expand Down Expand Up @@ -410,3 +412,180 @@ var _ = Describe("ClusterManager API test with WorkConfiguration", func() {
Expect(clusterManager.Spec.WorkConfiguration.FeatureGates[1].Mode).Should(Equal(operatorv1.FeatureGateModeTypeEnable))
})
})

var _ = Describe("ClusterManager v1 Enhanced API test", func() {
var clusterManagerName string

BeforeEach(func() {
suffix := rand.String(5)
clusterManagerName = fmt.Sprintf("cm-enhanced-%s", suffix)
})

AfterEach(func() {
err := operatorClient.OperatorV1().ClusterManagers().Delete(context.TODO(), clusterManagerName, metav1.DeleteOptions{})
if !apierrors.IsForbidden(err) {
Expect(err).ToNot(HaveOccurred())
}
})
Comment on lines +424 to +429
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

AfterEach should ignore NotFound as well

Avoid failing cleanup when the resource wasn’t created.

 	AfterEach(func() {
 		err := operatorClient.OperatorV1().ClusterManagers().Delete(context.TODO(), clusterManagerName, metav1.DeleteOptions{})
-		if !apierrors.IsForbidden(err) {
-			Expect(err).ToNot(HaveOccurred())
-		}
+		if err != nil && !apierrors.IsForbidden(err) && !apierrors.IsNotFound(err) {
+			Expect(err).ToNot(HaveOccurred())
+		}
 	})
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
AfterEach(func() {
err := operatorClient.OperatorV1().ClusterManagers().Delete(context.TODO(), clusterManagerName, metav1.DeleteOptions{})
if !apierrors.IsForbidden(err) {
Expect(err).ToNot(HaveOccurred())
}
})
AfterEach(func() {
err := operatorClient.OperatorV1().ClusterManagers().Delete(context.TODO(), clusterManagerName, metav1.DeleteOptions{})
- if !apierrors.IsForbidden(err) {
- Expect(err).ToNot(HaveOccurred())
if err != nil && !apierrors.IsForbidden(err) && !apierrors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
})
🤖 Prompt for AI Agents
In test/integration/api/clustermanager_test.go around lines 424 to 429, the
AfterEach cleanup currently ignores Forbidden errors but will fail if the
ClusterManager was never created; update the error handling to also ignore
NotFound errors by checking apierrors.IsNotFound(err) alongside
apierrors.IsForbidden(err) (i.e., treat nil, IsForbidden, or IsNotFound as
non-fatal) so the test cleanup does not fail when the resource does not exist.


Context("ClusterManager comprehensive configuration validation", func() {
It("should handle complete configuration with all optional fields", func() {
clusterManager := &operatorv1.ClusterManager{
ObjectMeta: metav1.ObjectMeta{
Name: clusterManagerName,
},
Spec: operatorv1.ClusterManagerSpec{
RegistrationImagePullSpec: "quay.io/test/registration:latest",
WorkImagePullSpec: "quay.io/test/work:latest",
PlacementImagePullSpec: "quay.io/test/placement:latest",
AddOnManagerImagePullSpec: "quay.io/test/addon-manager:latest",
NodePlacement: operatorv1.NodePlacement{
NodeSelector: map[string]string{
"node-role.kubernetes.io/infra": "",
},
Tolerations: []v1.Toleration{
{
Key: "node-role.kubernetes.io/infra",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
},
},
DeployOption: operatorv1.ClusterManagerDeployOption{
Mode: operatorv1.InstallModeDefault,
},
RegistrationConfiguration: &operatorv1.RegistrationHubConfiguration{
AutoApproveUsers: []string{"system:admin"},
FeatureGates: []operatorv1.FeatureGate{
{
Feature: "DefaultClusterSet",
Mode: operatorv1.FeatureGateModeTypeEnable,
},
},
},
WorkConfiguration: &operatorv1.WorkConfiguration{
WorkDriver: operatorv1.WorkDriverTypeKube,
FeatureGates: []operatorv1.FeatureGate{
{
Feature: "ManifestWorkReplicaSet",
Mode: operatorv1.FeatureGateModeTypeEnable,
},
},
},
},
}

createdClusterManager, err := operatorClient.OperatorV1().ClusterManagers().Create(context.TODO(), clusterManager, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(createdClusterManager.Spec.NodePlacement.NodeSelector["node-role.kubernetes.io/infra"]).Should(Equal(""))
Expect(len(createdClusterManager.Spec.NodePlacement.Tolerations)).Should(Equal(1))
Expect(createdClusterManager.Spec.RegistrationConfiguration.FeatureGates[0].Mode).Should(Equal(operatorv1.FeatureGateModeTypeEnable))
Expect(createdClusterManager.Spec.WorkConfiguration.FeatureGates[0].Mode).Should(Equal(operatorv1.FeatureGateModeTypeEnable))
})

It("should validate addon manager configuration", func() {
clusterManager := &operatorv1.ClusterManager{
ObjectMeta: metav1.ObjectMeta{
Name: clusterManagerName,
},
Spec: operatorv1.ClusterManagerSpec{
AddOnManagerConfiguration: &operatorv1.AddOnManagerConfiguration{
FeatureGates: []operatorv1.FeatureGate{
{
Feature: "AddonManagement",
Mode: operatorv1.FeatureGateModeTypeEnable,
},
},
},
},
}

createdClusterManager, err := operatorClient.OperatorV1().ClusterManagers().Create(context.TODO(), clusterManager, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(createdClusterManager.Spec.AddOnManagerConfiguration.FeatureGates[0].Feature).Should(Equal("AddonManagement"))
})

It("should validate server configuration", func() {
clusterManager := &operatorv1.ClusterManager{
ObjectMeta: metav1.ObjectMeta{
Name: clusterManagerName,
},
Spec: operatorv1.ClusterManagerSpec{
ServerConfiguration: &operatorv1.ServerConfiguration{},
},
}

createdClusterManager, err := operatorClient.OperatorV1().ClusterManagers().Create(context.TODO(), clusterManager, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(createdClusterManager.Spec.ServerConfiguration).ShouldNot(BeNil())
})
})

Context("ClusterManager resource requirements", func() {
It("should handle resource requirements configuration", func() {
clusterManager := &operatorv1.ClusterManager{
ObjectMeta: metav1.ObjectMeta{
Name: clusterManagerName,
},
Spec: operatorv1.ClusterManagerSpec{
ResourceRequirement: &operatorv1.ResourceRequirement{
Type: operatorv1.ResourceQosClassResourceRequirement,
},
},
}

createdClusterManager, err := operatorClient.OperatorV1().ClusterManagers().Create(context.TODO(), clusterManager, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(createdClusterManager.Spec.ResourceRequirement.Type).Should(Equal(operatorv1.ResourceQosClassResourceRequirement))
})
})

Context("ClusterManager status updates", func() {
It("should allow status updates", func() {
clusterManager := &operatorv1.ClusterManager{
ObjectMeta: metav1.ObjectMeta{
Name: clusterManagerName,
},
Spec: operatorv1.ClusterManagerSpec{},
}

createdClusterManager, err := operatorClient.OperatorV1().ClusterManagers().Create(context.TODO(), clusterManager, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())

// Update status
createdClusterManager.Status = operatorv1.ClusterManagerStatus{
ObservedGeneration: 1,
Conditions: []metav1.Condition{
{
Type: "Applied",
Status: metav1.ConditionTrue,
Reason: "ClusterManagerDeployed",
LastTransitionTime: metav1.Now(),
},
},
Generations: []operatorv1.GenerationStatus{
{
Group: "apps",
Version: "v1",
Resource: "deployments",
Namespace: "open-cluster-management-hub",
Name: "cluster-manager-registration-controller",
LastGeneration: 1,
},
},
RelatedResources: []operatorv1.RelatedResourceMeta{
{
Group: "apps",
Version: "v1",
Resource: "deployments",
Namespace: "open-cluster-management-hub",
Name: "cluster-manager-registration-controller",
},
},
}

_, err = operatorClient.OperatorV1().ClusterManagers().UpdateStatus(context.TODO(), createdClusterManager, metav1.UpdateOptions{})
Expect(err).ToNot(HaveOccurred())
})
})
})
Loading