diff --git a/.golangci.yml b/.golangci.yml index d0571a1c..ecdaa2cb 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,8 +17,9 @@ linters-settings: - "$all" - "!$test" deny: - - pkg: "reflect" - desc: "Reflection is never clear." + # TODO: Remove reflect from loadbalancers.go and reinstate this requirement + #- pkg: "reflect" + # desc: "Reflection is never clear." - pkg: "gob" desc: "Please convert types manually" @@ -112,7 +113,7 @@ linters: - copyloopvar #- cyclop - decorder - #- depguard + - depguard - dogsled - dupl - dupword @@ -123,16 +124,16 @@ linters: - exhaustive #- forbidigo #- forcetypeassert - #- gci + - gci - gocheckcompilerdirectives - gofmt - goimports #- gocognit - #- goconst + - goconst #- gocritic - gofumpt - goprintffuncname - #- gosec + - gosec - importas - loggercheck - maintidx @@ -147,7 +148,7 @@ linters: - nolintlint - nosprintfhostport #- paralleltest - #- prealloc + - prealloc - predeclared - reassign #- tenv @@ -155,7 +156,7 @@ linters: - unconvert - unparam - usestdlibvars - #- varnamelen + - varnamelen - wastedassign - whitespace diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index ea5102ae..1d55eb2c 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -18,6 +18,12 @@ import ( "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" ) +const ( + clusterName string = "linodelb" + nodeSelector string = "cilium-bgp-peering=true" + dummyIP string = "45.76.101.26" +) + var ( zone = "us-ord" nodes = []*v1.Node{ @@ -201,7 +207,7 @@ func createNewIpHolderInstance() linodego.Instance { func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "" - Options.IpHolderSuffix = "linodelb" + Options.IpHolderSuffix = clusterName t.Setenv("BGP_PEER_PREFIX", "2600:3cef") svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -218,7 +224,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} rawFilter, _ = json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -239,7 +245,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { LinodeID: 33333, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -249,7 +255,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { } func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.BGPNodeSelector = nodeSelector svc := createTestService() kubeClient, _ := k8sClient.NewFakeClientset() @@ -257,7 +263,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { addService(t, kubeClient, svc) lb := &loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType} - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") } @@ -268,7 +274,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { // Use BGP custom id map t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") lb = &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} - lbStatus, err = lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err = lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") } @@ -278,7 +284,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { } func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.BGPNodeSelector = nodeSelector svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -291,7 +297,7 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -307,7 +313,7 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -317,8 +323,8 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, } func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -331,7 +337,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -347,7 +353,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -357,7 +363,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, } func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.BGPNodeSelector = nodeSelector Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -371,7 +377,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -387,7 +393,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -397,7 +403,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi } func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.BGPNodeSelector = nodeSelector Options.IpHolderSuffix = "" svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -414,7 +420,7 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} rawFilter, _ = json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -431,7 +437,7 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -441,8 +447,8 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC } func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -458,7 +464,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} rawFilter, _ = json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -475,7 +481,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -485,7 +491,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo } func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.BGPNodeSelector = nodeSelector Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -502,7 +508,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} rawFilter, _ = json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -519,7 +525,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -529,7 +535,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc } func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.BGPNodeSelector = nodeSelector svc := createTestService() kubeClient, _ := k8sClient.NewFakeClientset() @@ -538,7 +544,7 @@ func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testi addNodes(t, kubeClient, nodes) lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} @@ -548,15 +554,15 @@ func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testi mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) - err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + err := lb.EnsureLoadBalancerDeleted(context.TODO(), clusterName, svc) if err != nil { t.Fatalf("expected a nil error, got %v", err) } } func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -566,7 +572,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi addNodes(t, kubeClient, nodes) lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} @@ -579,14 +585,14 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) - err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + err := lb.EnsureLoadBalancerDeleted(context.TODO(), clusterName, svc) if err != nil { t.Fatalf("expected a nil error, got %v", err) } } func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.BGPNodeSelector = nodeSelector svc := createTestService() kubeClient, _ := k8sClient.NewFakeClientset() @@ -598,7 +604,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -614,7 +620,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -635,15 +641,15 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi }).Times(1) addNodes(t, kubeClient, additionalNodes) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes) + err = lb.UpdateLoadBalancer(context.TODO(), clusterName, svc, additionalNodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } } func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -659,7 +665,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} rawFilter, _ = json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -675,7 +681,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -702,7 +708,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi }).Times(1) addNodes(t, kubeClient, additionalNodes) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes) + err = lb.UpdateLoadBalancer(context.TODO(), clusterName, svc, additionalNodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } diff --git a/cloud/linode/instances.go b/cloud/linode/instances.go index e6608f94..384b9a13 100644 --- a/cloud/linode/instances.go +++ b/cloud/linode/instances.go @@ -99,13 +99,13 @@ func (nc *nodeCache) refreshInstances(ctx context.Context, client client.Client) } newNodes := make(map[int]linodeInstance, len(instances)) - for i, instance := range instances { + for index, instance := range instances { // if running within VPC, only store instances in cache which are part of VPC if Options.VPCNames != "" && len(vpcNodes[instance.ID]) == 0 { continue } node := linodeInstance{ - instance: &instances[i], + instance: &instances[index], ips: nc.getInstanceAddresses(instance, vpcNodes[instance.ID]), } newNodes[instance.ID] = node diff --git a/cloud/linode/instances_test.go b/cloud/linode/instances_test.go index 97edf43a..a1cd328e 100644 --- a/cloud/linode/instances_test.go +++ b/cloud/linode/instances_test.go @@ -17,6 +17,13 @@ import ( cloudprovider "k8s.io/cloud-provider" "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + +) + +const ( + instanceName string = "mock-instance" + usEast string = "us-east" + typeG6 string = "g6-standard-1" ) func nodeWithProviderID(providerID string) *v1.Node { @@ -53,7 +60,7 @@ func TestInstanceExists(t *testing.T) { { ID: 123, Label: "mock", - Region: "us-east", + Region: usEast, Type: "g6-standard-2", }, }, nil) @@ -114,14 +121,13 @@ func TestMetadataRetrieval(t *testing.T) { t.Run("should return data when linode is found (by name)", func(t *testing.T) { instances := newInstances(client) id := 123 - name := "mock-instance" - node := nodeWithName(name) + node := nodeWithName(instanceName) publicIPv4 := net.ParseIP("45.76.101.25") privateIPv4 := net.ParseIP("192.168.133.65") - linodeType := "g6-standard-1" - region := "us-east" + linodeType := typeG6 + region := usEast client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ - {ID: id, Label: name, Type: linodeType, Region: region, IPv4: []*net.IP{&publicIPv4, &privateIPv4}}, + {ID: id, Label: instanceName, Type: linodeType, Region: region, IPv4: []*net.IP{&publicIPv4, &privateIPv4}}, }, nil) meta, err := instances.InstanceMetadata(ctx, node) @@ -132,7 +138,7 @@ func TestMetadataRetrieval(t *testing.T) { assert.Equal(t, []v1.NodeAddress{ { Type: v1.NodeHostName, - Address: name, + Address: instanceName, }, { Type: v1.NodeExternalIP, @@ -148,13 +154,11 @@ func TestMetadataRetrieval(t *testing.T) { t.Run("should return data when linode is found (by name) and addresses must be in order", func(t *testing.T) { instances := newInstances(client) id := 123 - name := "mock-instance" - node := nodeWithName(name) + node := nodeWithName(instanceName) publicIPv4 := net.ParseIP("45.76.101.25") privateIPv4 := net.ParseIP("192.168.133.65") ipv6Addr := "2001::8a2e:370:7348" - linodeType := "g6-standard-1" - region := "us-east" + linodeType := typeG6 Options.VPCNames = "test" vpcIDs["test"] = 1 @@ -162,9 +166,9 @@ func TestMetadataRetrieval(t *testing.T) { instance := linodego.Instance{ ID: id, - Label: name, + Label: instanceName, Type: linodeType, - Region: region, + Region: usEast, IPv4: []*net.IP{&publicIPv4, &privateIPv4}, IPv6: ipv6Addr, } @@ -201,12 +205,12 @@ func TestMetadataRetrieval(t *testing.T) { meta, err := instances.InstanceMetadata(ctx, node) assert.NoError(t, err) assert.Equal(t, providerIDPrefix+strconv.Itoa(id), meta.ProviderID) - assert.Equal(t, region, meta.Region) + assert.Equal(t, usEast, meta.Region) assert.Equal(t, linodeType, meta.InstanceType) assert.Equal(t, []v1.NodeAddress{ { Type: v1.NodeHostName, - Address: name, + Address: instanceName, }, { Type: v1.NodeInternalIP, @@ -345,8 +349,8 @@ func TestMetadataRetrieval(t *testing.T) { ips = append(ips, &parsed) } - linodeType := "g6-standard-1" - region := "us-east" + linodeType := typeG6 + region := usEast client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ {ID: id, Label: name, Type: linodeType, Region: region, IPv4: ips, IPv6: test.inputIPv6}, }, nil) diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 093ab45b..557e1a13 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -861,16 +861,16 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam return l.createNodeBalancer(ctx, clusterName, service, configs) } -func coerceString(s string, minLen, maxLen int, padding string) string { +func coerceString(str string, minLen, maxLen int, padding string) string { if len(padding) == 0 { padding = "x" } - if len(s) > maxLen { - return s[:maxLen] - } else if len(s) < minLen { - return coerceString(fmt.Sprintf("%s%s", padding, s), minLen, maxLen, padding) + if len(str) > maxLen { + return str[:maxLen] + } else if len(str) < minLen { + return coerceString(fmt.Sprintf("%s%s", padding, str), minLen, maxLen, padding) } - return s + return str } func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int) linodego.NodeBalancerConfigRebuildNodeOptions { diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 7ae0ac57..3ef2cd08 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -115,6 +115,8 @@ o/aoxqmE0mN1lyCPOa9UP//LlsREkWVKI3+Wld/xERtzf66hjcH+ilsXDxxpMEXo bSiPJQsGIKtQvyCaZY2szyOoeUGgOId+He7ITlezxKrjdj+1pLMESvAxKeo= -----END RSA PRIVATE KEY-----` +const drop string = "DROP" + func TestCCMLoadBalancers(t *testing.T) { testCases := []struct { name string @@ -1529,8 +1531,8 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("Firewalls attached when none specified") } - var ipv4s []string - var ipv6s []string + ipv4s := make([]string, 0, 400) + ipv6s := make([]string, 0, 300) i := 0 for i < 400 { ipv4s = append(ipv4s, fmt.Sprintf("%d.%d.%d.%d", 192, rand.Int31n(255), rand.Int31n(255), rand.Int31n(255))) @@ -1587,7 +1589,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("No firewalls found") } - if firewallsNew[0].Rules.InboundPolicy != "DROP" { + if firewallsNew[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewallsNew[0].Rules.InboundPolicy) } @@ -1663,7 +1665,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg t.Fatalf("No firewalls attached") } - if firewalls[0].Rules.InboundPolicy != "DROP" { + if firewalls[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewalls[0].Rules.InboundPolicy) } @@ -1756,7 +1758,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Fatalf("No firewalls attached") } - if firewalls[0].Rules.InboundPolicy != "DROP" { + if firewalls[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewalls[0].Rules.InboundPolicy) } @@ -1942,7 +1944,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Fatalf("No attached firewalls found") } - if firewallsNew[0].Rules.InboundPolicy != "DROP" { + if firewallsNew[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewallsNew[0].Rules.InboundPolicy) } @@ -2022,7 +2024,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("No firewalls attached") } - if firewalls[0].Rules.InboundPolicy != "DROP" { + if firewalls[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewalls[0].Rules.InboundPolicy) } diff --git a/cloud/linode/node_controller.go b/cloud/linode/node_controller.go index 365e4da0..62f74edb 100644 --- a/cloud/linode/node_controller.go +++ b/cloud/linode/node_controller.go @@ -185,26 +185,26 @@ func (s *nodeController) handleNode(ctx context.Context, node *v1.Node) error { if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Get a fresh copy of the node so the resource version is up-to-date - n, err := s.kubeclient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + nodeResult, err := s.kubeclient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return err } // Try to update the node UUID if it has not been set - if n.Labels[annotations.AnnLinodeHostUUID] != linode.HostUUID { - n.Labels[annotations.AnnLinodeHostUUID] = linode.HostUUID + if nodeResult.Labels[annotations.AnnLinodeHostUUID] != linode.HostUUID { + nodeResult.Labels[annotations.AnnLinodeHostUUID] = linode.HostUUID } // Try to update the node ProviderID if it has not been set - if n.Spec.ProviderID == "" { - n.Spec.ProviderID = providerIDPrefix + strconv.Itoa(linode.ID) + if nodeResult.Spec.ProviderID == "" { + nodeResult.Spec.ProviderID = providerIDPrefix + strconv.Itoa(linode.ID) } // Try to update the expectedPrivateIP if its not set or doesn't match - if n.Annotations[annotations.AnnLinodeNodePrivateIP] != expectedPrivateIP && expectedPrivateIP != "" { - n.Annotations[annotations.AnnLinodeNodePrivateIP] = expectedPrivateIP + if nodeResult.Annotations[annotations.AnnLinodeNodePrivateIP] != expectedPrivateIP && expectedPrivateIP != "" { + nodeResult.Annotations[annotations.AnnLinodeNodePrivateIP] = expectedPrivateIP } - _, err = s.kubeclient.CoreV1().Nodes().Update(ctx, n, metav1.UpdateOptions{}) + _, err = s.kubeclient.CoreV1().Nodes().Update(ctx, nodeResult, metav1.UpdateOptions{}) return err }); err != nil { klog.V(1).ErrorS(err, "Node update error")