Skip to content

Commit e22f48b

Browse files
committed
feat: create kubectl-fleet plugin
Signed-off-by: Wantong Jiang <[email protected]>
1 parent 4aedc78 commit e22f48b

File tree

20 files changed

+1012
-477
lines changed

20 files changed

+1012
-477
lines changed

CLAUDE.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,4 +154,8 @@ All controllers follow standard Kubernetes controller patterns:
154154
- Controllers should be thoroughly tested with integration tests
155155
- New scheduler plugins should implement both Filter and Score interfaces
156156
- Use existing patterns from similar controllers when adding new functionality
157-
- Property providers should implement the `PropertyProvider` interface
157+
- Property providers should implement the `PropertyProvider` interface
158+
159+
## Coding Style
160+
- When writing comments, capitalize the first character (unless it starts with the function name) and end with a period
161+
_ When writing tests, use keyword `want` instead of `expected`

hack/Azure/property-based-scheduling.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ To set up Fleet using the clusters you just created, you will need to install Fl
103103
First, clone the Fleet source code repository, which contains the Helm charts used for Fleet agent installation:
104104

105105
```sh
106-
git clone https://github.com/Azure/fleet.git
106+
git clone https://github.com/kubefleet-dev/kubefleet.git
107107
cd fleet
108108
git checkout demo
109109
```
@@ -407,7 +407,7 @@ spec:
407407
EOF
408408
```
409409

410-
The CRP API enables great flexibility; you can set up different requirements/preferences in combination, such as finding all clusters with at least 5 nodes and 10 available CPU cores, or 4 of all the clusters with the cheapest memory cost and the most amount of available memory. [Read Fleet's API definition to learn more](https://github.com/Azure/fleet/blob/main/apis/placement/v1beta1/clusterresourceplacement_types.go).
410+
The CRP API enables great flexibility; you can set up different requirements/preferences in combination, such as finding all clusters with at least 5 nodes and 10 available CPU cores, or 4 of all the clusters with the cheapest memory cost and the most amount of available memory. [Read Fleet's API definition to learn more](https://github.com/kubefleet-dev/kubefleet/blob/main/apis/placement/v1beta1/clusterresourceplacement_types.go).
411411

412412
## Clean things up
413413

@@ -421,9 +421,9 @@ All the AKS clusters in the resource group will be removed.
421421

422422
## What's next
423423

424-
Congrats! We hope that property-based scheduling (preview) has improved your overall Fleet experience. If you have any questions, feedback, or concerns, please raise [a GitHub issue](https://github.com/Azure/fleet/issues).
424+
Congrats! We hope that property-based scheduling (preview) has improved your overall Fleet experience. If you have any questions, feedback, or concerns, please raise [a GitHub issue](https://github.com/kubefleet-dev/kubefleet/issues).
425425

426426
Aside from property-based scheduling, Fleet offers many other scheduling features that are useful in a
427-
multi-cluster environment; check out the [How-to Guide: Using the Fleet `ClusterResourcePlacement` API](https://github.com/Azure/fleet/tree/main/docs/howtos/crp.md) for more information.
427+
multi-cluster environment; check out the [How-to Guide: Using the Fleet `ClusterResourcePlacement` API](https://kubefleet.dev/docs/how-tos/crp/) for more information.
428428

429-
You can also review Fleet's [source code](https://github.com/Azure/fleet) or review its [documentation](https://github.com/Azure/fleet/tree/main/docs) on GitHub.
429+
You can also review Fleet's [source code](https://github.com/kubefleet-dev/kubefleet) or review its [documentation](https://kubefleet.dev/docs/) on GitHub.

test/e2e/drain_tool_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ var _ = Describe("Drain is allowed on one cluster, blocked on others - ClusterRe
359359

360360
func runDrainClusterBinary(hubClusterName, memberClusterName string) {
361361
By(fmt.Sprintf("draining cluster %s", memberClusterName))
362-
cmd := exec.Command(drainBinaryPath,
362+
cmd := exec.Command(fleetBinaryPath, "draincluster",
363363
"--hubClusterContext", hubClusterName,
364364
"--clusterName", memberClusterName)
365365
_, err := cmd.CombinedOutput()
@@ -368,7 +368,7 @@ func runDrainClusterBinary(hubClusterName, memberClusterName string) {
368368

369369
func runUncordonClusterBinary(hubClusterName, memberClusterName string) {
370370
By(fmt.Sprintf("uncordoning cluster %s", memberClusterName))
371-
cmd := exec.Command(uncordonBinaryPath,
371+
cmd := exec.Command(fleetBinaryPath, "uncordoncluster",
372372
"--hubClusterContext", hubClusterName,
373373
"--clusterName", memberClusterName)
374374
_, err := cmd.CombinedOutput()

test/e2e/setup.sh

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -215,10 +215,6 @@ done
215215
# Create tools directory if it doesn't exist
216216
mkdir -p ../../hack/tools/bin
217217

218-
# Build drain binary
219-
echo "Building drain binary..."
220-
go build -o ../../hack/tools/bin/kubectl-draincluster ../../tools/draincluster
221-
222-
# Build uncordon binary
223-
echo "Building uncordon binary..."
224-
go build -o ../../hack/tools/bin/kubectl-uncordoncluster ../../tools/uncordoncluster
218+
# Build fleet plugin binary
219+
echo "Building fleet kubectl-plugin binary..."
220+
go build -o ../../hack/tools/bin/kubectl-fleet ../../tools/fleet

test/e2e/setup_test.go

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -173,8 +173,7 @@ var (
173173
)
174174

175175
var (
176-
drainBinaryPath = filepath.Join("../../", "hack", "tools", "bin", "kubectl-draincluster")
177-
uncordonBinaryPath = filepath.Join("../../", "hack", "tools", "bin", "kubectl-uncordoncluster")
176+
fleetBinaryPath = filepath.Join("../../", "hack", "tools", "bin", "kubectl-fleet")
178177
)
179178

180179
var (
@@ -376,11 +375,9 @@ func beforeSuiteForAllProcesses() {
376375
allMemberClusterNames = append(allMemberClusterNames, allMemberClusters[i].ClusterName)
377376
}
378377

379-
// Check if drain cluster and uncordon cluster binaries exist.
380-
_, err := os.Stat(drainBinaryPath)
381-
Expect(os.IsNotExist(err)).To(BeFalse(), fmt.Sprintf("drain binary not found at %s", drainBinaryPath))
382-
_, err = os.Stat(uncordonBinaryPath)
383-
Expect(os.IsNotExist(err)).To(BeFalse(), fmt.Sprintf("uncordon binary not found at %s", uncordonBinaryPath))
378+
// Check if kubectl-fleet binary exists.
379+
_, err := os.Stat(fleetBinaryPath)
380+
Expect(os.IsNotExist(err)).To(BeFalse(), fmt.Sprintf("kubectl-fleet binary not found at %s", fleetBinaryPath))
384381
})
385382
}
386383

test/e2e/updaterun_test.go

Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package e2e
1818

1919
import (
2020
"fmt"
21+
"os/exec"
2122
"time"
2223

2324
. "github.com/onsi/ginkgo/v2"
@@ -1033,6 +1034,127 @@ var _ = Describe("test CRP rollout with staged update run", func() {
10331034
})
10341035
})
10351036

1037+
Context("Test kubectl-fleet approve plugin with cluster approval requests", Ordered, func() {
1038+
var strategy *placementv1beta1.ClusterStagedUpdateStrategy
1039+
updateRunName := fmt.Sprintf(updateRunNameWithSubIndexTemplate, GinkgoParallelProcess(), 0)
1040+
1041+
BeforeAll(func() {
1042+
// Create a test namespace and a configMap inside it on the hub cluster.
1043+
createWorkResources()
1044+
1045+
// Create the CRP with external rollout strategy.
1046+
crp := &placementv1beta1.ClusterResourcePlacement{
1047+
ObjectMeta: metav1.ObjectMeta{
1048+
Name: crpName,
1049+
// Add a custom finalizer; this would allow us to better observe
1050+
// the behavior of the controllers.
1051+
Finalizers: []string{customDeletionBlockerFinalizer},
1052+
},
1053+
Spec: placementv1beta1.PlacementSpec{
1054+
ResourceSelectors: workResourceSelector(),
1055+
Strategy: placementv1beta1.RolloutStrategy{
1056+
Type: placementv1beta1.ExternalRolloutStrategyType,
1057+
},
1058+
},
1059+
}
1060+
Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP")
1061+
1062+
// Create the clusterStagedUpdateStrategy.
1063+
strategy = createStagedUpdateStrategySucceed(strategyName)
1064+
})
1065+
1066+
AfterAll(func() {
1067+
// Remove the custom deletion blocker finalizer from the CRP.
1068+
ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters)
1069+
1070+
// Delete the clusterStagedUpdateRun.
1071+
ensureUpdateRunDeletion(updateRunName)
1072+
1073+
// Delete the clusterStagedUpdateStrategy.
1074+
ensureUpdateRunStrategyDeletion(strategyName)
1075+
})
1076+
1077+
It("Should create a staged update run and verify cluster approval request is created", func() {
1078+
validateLatestResourceSnapshot(crpName, resourceSnapshotIndex1st)
1079+
validateLatestPolicySnapshot(crpName, policySnapshotIndex1st, 3)
1080+
createStagedUpdateRunSucceed(updateRunName, crpName, resourceSnapshotIndex1st, strategyName)
1081+
1082+
// Verify that cluster approval request is created for canary stage.
1083+
Eventually(func() error {
1084+
appReqList := &placementv1beta1.ClusterApprovalRequestList{}
1085+
if err := hubClient.List(ctx, appReqList, client.MatchingLabels{
1086+
placementv1beta1.TargetUpdatingStageNameLabel: envCanary,
1087+
placementv1beta1.TargetUpdateRunLabel: updateRunName,
1088+
}); err != nil {
1089+
return fmt.Errorf("failed to list approval requests: %w", err)
1090+
}
1091+
1092+
if len(appReqList.Items) != 1 {
1093+
return fmt.Errorf("want 1 approval request, got %d", len(appReqList.Items))
1094+
}
1095+
return nil
1096+
}, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to find cluster approval request")
1097+
})
1098+
1099+
It("Should approve cluster approval request using kubectl-fleet approve plugin", func() {
1100+
var approvalRequestName string
1101+
1102+
// Get the cluster approval request name.
1103+
Eventually(func() error {
1104+
appReqList := &placementv1beta1.ClusterApprovalRequestList{}
1105+
if err := hubClient.List(ctx, appReqList, client.MatchingLabels{
1106+
placementv1beta1.TargetUpdatingStageNameLabel: envCanary,
1107+
placementv1beta1.TargetUpdateRunLabel: updateRunName,
1108+
}); err != nil {
1109+
return fmt.Errorf("failed to list approval requests: %w", err)
1110+
}
1111+
1112+
if len(appReqList.Items) != 1 {
1113+
return fmt.Errorf("want 1 approval request, got %d", len(appReqList.Items))
1114+
}
1115+
1116+
approvalRequestName = appReqList.Items[0].Name
1117+
return nil
1118+
}, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get approval request name")
1119+
1120+
// Use kubectl-fleet approve plugin to approve the request
1121+
cmd := exec.Command(fleetBinaryPath, "approve", "clusterapprovalrequest",
1122+
"--hubClusterContext", "kind-hub",
1123+
"--name", approvalRequestName)
1124+
output, err := cmd.CombinedOutput()
1125+
Expect(err).ToNot(HaveOccurred(), "kubectl-fleet approve failed: %s", string(output))
1126+
1127+
// Verify the approval request is approved
1128+
Eventually(func() error {
1129+
var appReq placementv1beta1.ClusterApprovalRequest
1130+
if err := hubClient.Get(ctx, client.ObjectKey{Name: approvalRequestName}, &appReq); err != nil {
1131+
return fmt.Errorf("failed to get approval request: %w", err)
1132+
}
1133+
1134+
approvedCondition := meta.FindStatusCondition(appReq.Status.Conditions, string(placementv1beta1.ApprovalRequestConditionApproved))
1135+
if approvedCondition == nil {
1136+
return fmt.Errorf("approved condition not found")
1137+
}
1138+
if approvedCondition.Status != metav1.ConditionTrue {
1139+
return fmt.Errorf("approved condition status is %s, want True", approvedCondition.Status)
1140+
}
1141+
return nil
1142+
}, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to verify approval request is approved")
1143+
})
1144+
1145+
It("Should complete the staged update run after approval", func() {
1146+
updateRunSucceededActual := updateRunStatusSucceededActual(updateRunName, policySnapshotIndex1st, len(allMemberClusters), defaultApplyStrategy, &strategy.Spec, [][]string{{allMemberClusterNames[1]}, {allMemberClusterNames[0], allMemberClusterNames[2]}}, nil, nil, nil)
1147+
Eventually(updateRunSucceededActual, updateRunEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to validate updateRun %s succeeded", updateRunName)
1148+
checkIfPlacedWorkResourcesOnMemberClustersInUpdateRun(allMemberClusters)
1149+
})
1150+
1151+
It("Should update crp status as completed", func() {
1152+
crpStatusUpdatedActual := crpStatusWithExternalStrategyActual(workResourceIdentifiers(), resourceSnapshotIndex1st, true, allMemberClusterNames,
1153+
[]string{resourceSnapshotIndex1st, resourceSnapshotIndex1st, resourceSnapshotIndex1st}, []bool{true, true, true}, nil, nil)
1154+
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName)
1155+
})
1156+
})
1157+
10361158
Context("Test CRP rollout strategy transition from external to rollingUpdate", Ordered, func() {
10371159
var strategy *placementv1beta1.ClusterStagedUpdateStrategy
10381160
updateRunName := fmt.Sprintf(updateRunNameWithSubIndexTemplate, GinkgoParallelProcess(), 0)

tools/draincluster/README.md

Lines changed: 0 additions & 77 deletions
This file was deleted.

0 commit comments

Comments
 (0)