Skip to content

Commit 0527d86

Browse files
authored
bump up version (#105)
modify golang ci Signed-off-by: nasusoba <[email protected]>
1 parent 7d52020 commit 0527d86

File tree

13 files changed

+272
-951
lines changed

13 files changed

+272
-951
lines changed

.golangci.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -147,14 +147,14 @@ linters-settings:
147147
- name: unused-parameter
148148
disabled: true
149149
staticcheck:
150-
go: "1.20"
150+
go: "1.21"
151151
stylecheck:
152-
go: "1.20"
152+
go: "1.21"
153153
tagliatelle:
154154
case:
155155
rules:
156156
# Any struct tag type can be used.
157157
# Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`
158158
json: goCamel
159159
unused:
160-
go: "1.20"
160+
go: "1.21"

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
# Build the manager binary
16-
FROM --platform=${BUILDPLATFORM} docker.io/library/golang:1.20.7 as build
16+
FROM --platform=${BUILDPLATFORM} docker.io/library/golang:1.21.9 as build
1717
ARG TARGETOS TARGETARCH
1818
ARG package
1919

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ SHELL:=/usr/bin/env bash
2020

2121
.DEFAULT_GOAL:=help
2222

23-
GO_VERSION ?= 1.20.7
23+
GO_VERSION ?= 1.21.9
2424
GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)
2525

2626
ARCH ?= $(shell go env GOARCH)

controlplane/controllers/kthreescontrolplane_controller.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import (
3434
"sigs.k8s.io/cluster-api/controllers/external"
3535
"sigs.k8s.io/cluster-api/util"
3636
"sigs.k8s.io/cluster-api/util/annotations"
37+
"sigs.k8s.io/cluster-api/util/collections"
3738
"sigs.k8s.io/cluster-api/util/conditions"
3839
"sigs.k8s.io/cluster-api/util/patch"
3940
"sigs.k8s.io/cluster-api/util/predicates"
@@ -48,7 +49,6 @@ import (
4849
controlplanev1 "github.com/k3s-io/cluster-api-k3s/controlplane/api/v1beta2"
4950
k3s "github.com/k3s-io/cluster-api-k3s/pkg/k3s"
5051
"github.com/k3s-io/cluster-api-k3s/pkg/kubeconfig"
51-
"github.com/k3s-io/cluster-api-k3s/pkg/machinefilters"
5252
"github.com/k3s-io/cluster-api-k3s/pkg/secret"
5353
"github.com/k3s-io/cluster-api-k3s/pkg/token"
5454
)
@@ -185,7 +185,7 @@ func (r *KThreesControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
185185
if err != nil {
186186
return reconcile.Result{}, err
187187
}
188-
ownedMachines := allMachines.Filter(machinefilters.OwnedMachines(kcp))
188+
ownedMachines := allMachines.Filter(collections.OwnedMachines(kcp))
189189

190190
// If no control plane machines remain, remove the finalizer
191191
if len(ownedMachines) == 0 {
@@ -219,7 +219,7 @@ func (r *KThreesControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
219219
}
220220

221221
// Delete control plane machines in parallel
222-
machinesToDelete := ownedMachines.Filter(machinefilters.Not(machinefilters.HasDeletionTimestamp))
222+
machinesToDelete := ownedMachines.Filter(collections.Not(collections.HasDeletionTimestamp))
223223
var errs []error
224224
for i := range machinesToDelete {
225225
m := machinesToDelete[i]
@@ -334,12 +334,12 @@ func (r *KThreesControlPlaneReconciler) ClusterToKThreesControlPlane(ctx context
334334
// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
335335
// resource status subresourcs up-to-date.
336336
func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KThreesControlPlane, cluster *clusterv1.Cluster) error {
337-
selector := machinefilters.ControlPlaneSelectorForCluster(cluster.Name)
337+
selector := collections.ControlPlaneSelectorForCluster(cluster.Name)
338338
// Copy label selector to its status counterpart in string format.
339339
// This is necessary for CRDs including scale subresources.
340340
kcp.Status.Selector = selector.String()
341341

342-
ownedMachines, err := r.managementCluster.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.OwnedMachines(kcp))
342+
ownedMachines, err := r.managementCluster.GetMachinesForCluster(ctx, util.ObjectKey(cluster), collections.OwnedMachines(kcp))
343343
if err != nil {
344344
return fmt.Errorf("failed to get list of owned machines: %w", err)
345345
}
@@ -377,7 +377,7 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c
377377
// make sure last resize operation is marked as completed.
378378
// NOTE: we are checking the number of machines ready so we report resize completed only when the machines
379379
// are actually provisioned (vs reporting completed immediately after the last machine object is created).
380-
readyMachines := ownedMachines.Filter(machinefilters.IsReady())
380+
readyMachines := ownedMachines.Filter(collections.IsReady())
381381
if int32(len(readyMachines)) == replicas {
382382
conditions.MarkTrue(kcp, controlplanev1.ResizedCondition)
383383
}
@@ -443,20 +443,20 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster *
443443
return result, err
444444
}
445445

446-
controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.ControlPlaneMachines(cluster.Name))
446+
controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), collections.ControlPlaneMachines(cluster.Name))
447447
if err != nil {
448448
logger.Error(err, "failed to retrieve control plane machines for cluster")
449449
return reconcile.Result{}, err
450450
}
451451

452-
adoptableMachines := controlPlaneMachines.Filter(machinefilters.AdoptableControlPlaneMachines(cluster.Name))
452+
adoptableMachines := controlPlaneMachines.Filter(collections.AdoptableControlPlaneMachines(cluster.Name))
453453
if len(adoptableMachines) > 0 {
454454
// We adopt the Machines and then wait for the update event for the ownership reference to re-queue them so the cache is up-to-date
455455
// err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster)
456456
return reconcile.Result{}, err
457457
}
458458

459-
ownedMachines := controlPlaneMachines.Filter(machinefilters.OwnedMachines(kcp))
459+
ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp))
460460
if len(ownedMachines) != len(controlPlaneMachines) {
461461
logger.Info("Not all control plane machines are owned by this KThreesControlPlane, refusing to operate in mixed management mode")
462462
return reconcile.Result{}, nil
@@ -526,7 +526,7 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster *
526526
case numMachines > desiredReplicas:
527527
logger.Info("Scaling down control plane", "Desired", desiredReplicas, "Existing", numMachines)
528528
// The last parameter (i.e. machines needing to be rolled out) should always be empty here.
529-
return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, k3s.FilterableMachineCollection{})
529+
return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, collections.Machines{})
530530
}
531531

532532
// Get the workload cluster client.
@@ -711,7 +711,7 @@ func (r *KThreesControlPlaneReconciler) upgradeControlPlane(
711711
cluster *clusterv1.Cluster,
712712
kcp *controlplanev1.KThreesControlPlane,
713713
controlPlane *k3s.ControlPlane,
714-
machinesRequireUpgrade k3s.FilterableMachineCollection,
714+
machinesRequireUpgrade collections.Machines,
715715
) (ctrl.Result, error) {
716716
// TODO: handle reconciliation of etcd members and kubeadm config in case they get out of sync with cluster
717717

controlplane/controllers/scale.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -32,24 +32,24 @@ import (
3232
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3333
"sigs.k8s.io/cluster-api/controllers/external"
3434
"sigs.k8s.io/cluster-api/util"
35+
"sigs.k8s.io/cluster-api/util/collections"
3536
"sigs.k8s.io/cluster-api/util/conditions"
3637
"sigs.k8s.io/cluster-api/util/patch"
3738
ctrl "sigs.k8s.io/controller-runtime"
3839

3940
bootstrapv1 "github.com/k3s-io/cluster-api-k3s/bootstrap/api/v1beta2"
4041
controlplanev1 "github.com/k3s-io/cluster-api-k3s/controlplane/api/v1beta2"
4142
k3s "github.com/k3s-io/cluster-api-k3s/pkg/k3s"
42-
"github.com/k3s-io/cluster-api-k3s/pkg/machinefilters"
4343
)
4444

4545
var ErrPreConditionFailed = errors.New("precondition check failed")
4646

4747
func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, controlPlane *k3s.ControlPlane) (ctrl.Result, error) {
48-
logger := controlPlane.Logger()
48+
logger := ctrl.LoggerFrom(ctx)
4949

5050
// Perform an uncached read of all the owned machines. This check is in place to make sure
5151
// that the controller cache is not misbehaving and we end up initializing the cluster more than once.
52-
ownedMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.OwnedMachines(kcp))
52+
ownedMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), collections.OwnedMachines(kcp))
5353
if err != nil {
5454
logger.Error(err, "failed to perform an uncached read of control plane machines for cluster")
5555
return ctrl.Result{}, err
@@ -62,7 +62,7 @@ func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Conte
6262
}
6363

6464
bootstrapSpec := controlPlane.InitialControlPlaneConfig()
65-
fd := controlPlane.NextFailureDomainForScaleUp()
65+
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
6666
if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil {
6767
logger.Error(err, "Failed to create initial control plane Machine")
6868
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err)
@@ -74,7 +74,7 @@ func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Conte
7474
}
7575

7676
func (r *KThreesControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, controlPlane *k3s.ControlPlane) (ctrl.Result, error) {
77-
logger := controlPlane.Logger()
77+
logger := ctrl.LoggerFrom(ctx)
7878

7979
// Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait.
8080
if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() {
@@ -83,7 +83,7 @@ func (r *KThreesControlPlaneReconciler) scaleUpControlPlane(ctx context.Context,
8383

8484
// Create the bootstrap configuration
8585
bootstrapSpec := controlPlane.JoinControlPlaneConfig()
86-
fd := controlPlane.NextFailureDomainForScaleUp()
86+
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
8787
if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil {
8888
logger.Error(err, "Failed to create additional control plane Machine")
8989
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err)
@@ -99,12 +99,12 @@ func (r *KThreesControlPlaneReconciler) scaleDownControlPlane(
9999
cluster *clusterv1.Cluster,
100100
kcp *controlplanev1.KThreesControlPlane,
101101
controlPlane *k3s.ControlPlane,
102-
outdatedMachines k3s.FilterableMachineCollection,
102+
outdatedMachines collections.Machines,
103103
) (ctrl.Result, error) {
104-
logger := controlPlane.Logger()
104+
logger := ctrl.LoggerFrom(ctx)
105105

106106
// Pick the Machine that we should scale down.
107-
machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines)
107+
machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines)
108108
if err != nil {
109109
return ctrl.Result{}, fmt.Errorf("failed to select machine for scale down: %w", err)
110110
}
@@ -179,7 +179,7 @@ func (r *KThreesControlPlaneReconciler) preflightChecks(_ context.Context, contr
179179

180180
// If there are deleting machines, wait for the operation to complete.
181181
if controlPlane.HasDeletingMachine() {
182-
logger.Info("Waiting for machines to be deleted", "Machines", strings.Join(controlPlane.Machines.Filter(machinefilters.HasDeletionTimestamp).Names(), ", "))
182+
logger.Info("Waiting for machines to be deleted", "Machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", "))
183183
return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil
184184
}
185185

@@ -237,7 +237,7 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust
237237
return nil
238238
}
239239

240-
func selectMachineForScaleDown(controlPlane *k3s.ControlPlane, outdatedMachines k3s.FilterableMachineCollection) (*clusterv1.Machine, error) {
240+
func selectMachineForScaleDown(ctx context.Context, controlPlane *k3s.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
241241
machines := controlPlane.Machines
242242
switch {
243243
case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0:
@@ -247,7 +247,7 @@ func selectMachineForScaleDown(controlPlane *k3s.ControlPlane, outdatedMachines
247247
case outdatedMachines.Len() > 0:
248248
machines = outdatedMachines
249249
}
250-
return controlPlane.MachineInFailureDomainWithMostMachines(machines)
250+
return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines)
251251
}
252252

253253
func (r *KThreesControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, bootstrapSpec *bootstrapv1.KThreesConfigSpec, failureDomain *string) error {

0 commit comments

Comments
 (0)