Skip to content

Commit 0719224

Browse files
author
Rahul Sharma
committed
fix linting errors
1 parent 0d9a469 commit 0719224

File tree

2 files changed

+38
-28
lines changed

2 files changed

+38
-28
lines changed

cloud/linode/nodeipamcontroller.go

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,10 @@ import (
3333
netutils "k8s.io/utils/net"
3434
)
3535

36+
const (
37+
maxAllowedNodeCIDRs = 2
38+
)
39+
3640
var (
3741
// defaultNodeMaskCIDRIPv4 is default mask size for IPv4 node cidr
3842
defaultNodeMaskCIDRIPv4 = 24
@@ -52,7 +56,7 @@ func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interfa
5256
// failure: bad cidrs in config
5357
clusterCIDRs, dualStack, err := processCIDRs(Options.ClusterCIDRIPv4)
5458
if err != nil {
55-
return fmt.Errorf("processCIDRs failed: %v", err)
59+
return fmt.Errorf("processCIDRs failed: %w", err)
5660
}
5761

5862
// failure: more than one cidr but they are not configured as dual stack
@@ -61,8 +65,8 @@ func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interfa
6165
}
6266

6367
// failure: more than cidrs is not allowed even with dual stack
64-
if len(clusterCIDRs) > 2 {
65-
return fmt.Errorf("len of clusters is:%v > more than max allowed of 2", len(clusterCIDRs))
68+
if len(clusterCIDRs) > maxAllowedNodeCIDRs {
69+
return fmt.Errorf("len of clusters is:%v > more than max allowed of %d", len(clusterCIDRs), maxAllowedNodeCIDRs)
6670
}
6771

6872
/* TODO: uncomment and fix if we want to support service cidr overlap with nodecidr
@@ -94,10 +98,7 @@ func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interfa
9498
}
9599
*/
96100

97-
nodeCIDRMaskSizes, err := setNodeCIDRMaskSizes(clusterCIDRs)
98-
if err != nil {
99-
return fmt.Errorf("setNodeCIDRMaskSizes failed: %v", err)
100-
}
101+
nodeCIDRMaskSizes := setNodeCIDRMaskSizes(clusterCIDRs)
101102

102103
ctx := wait.ContextForChannel(stopCh)
103104

@@ -110,7 +111,7 @@ func startNodeIpamController(stopCh <-chan struct{}, cloud cloudprovider.Interfa
110111
serviceCIDR,
111112
secondaryServiceCIDR,
112113
nodeCIDRMaskSizes,
113-
ipam.CIDRAllocatorType(ipam.RangeAllocatorType),
114+
ipam.RangeAllocatorType,
114115
)
115116
if err != nil {
116117
return err
@@ -134,12 +135,15 @@ func processCIDRs(cidrsList string) ([]*net.IPNet, bool, error) {
134135

135136
// if cidrs has an error then the previous call will fail
136137
// safe to ignore error checking on next call
137-
dualstack, _ := netutils.IsDualStackCIDRs(cidrs)
138+
dualstack, err := netutils.IsDualStackCIDRs(cidrs)
139+
if err != nil {
140+
return nil, false, fmt.Errorf("failed to perform dualstack check on cidrs: %w", err)
141+
}
138142

139143
return cidrs, dualstack, nil
140144
}
141145

142-
func setNodeCIDRMaskSizes(clusterCIDRs []*net.IPNet) ([]int, error) {
146+
func setNodeCIDRMaskSizes(clusterCIDRs []*net.IPNet) []int {
143147
sortedSizes := func(maskSizeIPv4, maskSizeIPv6 int) []int {
144148
nodeMaskCIDRs := make([]int, len(clusterCIDRs))
145149

@@ -159,5 +163,5 @@ func setNodeCIDRMaskSizes(clusterCIDRs []*net.IPNet) ([]int, error) {
159163
if Options.NodeCIDRMaskSizeIPv6 != 0 {
160164
defaultNodeMaskCIDRIPv6 = Options.NodeCIDRMaskSizeIPv6
161165
}
162-
return sortedSizes(defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6), nil
166+
return sortedSizes(defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6)
163167
}

cloud/linode/nodeipamcontroller_test.go

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ import (
2424
"reflect"
2525
"testing"
2626

27+
"k8s.io/client-go/informers"
2728
v1 "k8s.io/client-go/informers/core/v1"
2829
"k8s.io/client-go/kubernetes"
30+
"k8s.io/client-go/kubernetes/fake"
2931
cloudprovider "k8s.io/cloud-provider"
3032
)
3133

@@ -38,18 +40,16 @@ func Test_setNodeCIDRMaskSizes(t *testing.T) {
3840
_, ipv4Net, _ := net.ParseCIDR("10.192.0.0/10")
3941
_, ipv6Net, _ := net.ParseCIDR("fd00::/56")
4042
tests := []struct {
41-
name string
42-
args args
43-
want []int
44-
wantErr bool
43+
name string
44+
args args
45+
want []int
4546
}{
4647
{
4748
name: "empty cluster cidrs",
4849
args: args{
4950
clusterCIDRs: []*net.IPNet{},
5051
},
51-
want: []int{},
52-
wantErr: false,
52+
want: []int{},
5353
},
5454
{
5555
name: "single cidr",
@@ -61,8 +61,7 @@ func Test_setNodeCIDRMaskSizes(t *testing.T) {
6161
},
6262
},
6363
},
64-
want: []int{defaultNodeMaskCIDRIPv4},
65-
wantErr: false,
64+
want: []int{defaultNodeMaskCIDRIPv4},
6665
},
6766
{
6867
name: "two cidrs",
@@ -78,8 +77,7 @@ func Test_setNodeCIDRMaskSizes(t *testing.T) {
7877
},
7978
},
8079
},
81-
want: []int{defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6},
82-
wantErr: false,
80+
want: []int{defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6},
8381
},
8482
{
8583
name: "two cidrs with custom mask sizes",
@@ -97,8 +95,7 @@ func Test_setNodeCIDRMaskSizes(t *testing.T) {
9795
ipv4NetMask: 25,
9896
ipv6NetMask: 80,
9997
},
100-
want: []int{25, 80},
101-
wantErr: false,
98+
want: []int{25, 80},
10299
},
103100
}
104101
for _, tt := range tests {
@@ -115,11 +112,7 @@ func Test_setNodeCIDRMaskSizes(t *testing.T) {
115112
if tt.args.ipv6NetMask != 0 {
116113
Options.NodeCIDRMaskSizeIPv6 = tt.args.ipv6NetMask
117114
}
118-
got, err := setNodeCIDRMaskSizes(tt.args.clusterCIDRs)
119-
if (err != nil) != tt.wantErr {
120-
t.Errorf("setNodeCIDRMaskSizes() error = %v, wantErr %v", err, tt.wantErr)
121-
return
122-
}
115+
got := setNodeCIDRMaskSizes(tt.args.clusterCIDRs)
123116
if !reflect.DeepEqual(got, tt.want) {
124117
t.Errorf("setNodeCIDRMaskSizes() = %v, want %v", got, tt.want)
125118
}
@@ -208,6 +201,7 @@ func Test_startNodeIpamController(t *testing.T) {
208201
allocateNodeCIDRs bool
209202
clusterCIDR string
210203
}
204+
kubeClient := fake.NewSimpleClientset()
211205
tests := []struct {
212206
name string
213207
args args
@@ -261,6 +255,18 @@ func Test_startNodeIpamController(t *testing.T) {
261255
},
262256
wantErr: true,
263257
},
258+
{
259+
name: "correct cidrs specified",
260+
args: args{
261+
stopCh: make(<-chan struct{}),
262+
cloud: nil,
263+
nodeInformer: informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Nodes(),
264+
kubeclient: kubeClient,
265+
allocateNodeCIDRs: true,
266+
clusterCIDR: "10.192.0.0/10",
267+
},
268+
wantErr: false,
269+
},
264270
}
265271
for _, tt := range tests {
266272
currAllocateNodeCIDRs := Options.AllocateNodeCIDRs

0 commit comments

Comments
 (0)