Skip to content

Commit 9facb9c

Browse files
committed
revert service ns suffix fallback, update docs
Signed-off-by: Ross Kirkpatrick <[email protected]>
1 parent 8d0ea9f commit 9facb9c

File tree

4 files changed

+85
-32
lines changed

4 files changed

+85
-32
lines changed

README.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,11 @@ These configuration options can be specified via the `port-*` annotation, encode
8787
#### Shared IP Load-Balancing
8888
**NOTE:** This feature requires contacting [Customer Support](https://www.linode.com/support/contact/) to enable provisioning additional IPs.
8989

90-
Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and `--load-balancer-type` is set to `cilium-bgp`. Additionally, if you plan to run multiple clusters within a single API Region, setting `--ip-holder-suffix` on the Linode CCM to a unique value per cluster will create an ip-holder nanode for every cluster created within that API Region.
90+
Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and `--load-balancer-type` is set to `cilium-bgp`.
91+
92+
If you plan to run multiple clusters within a single API Region, setting `--ip-holder-suffix` on the Linode CCM to a unique value per cluster will create an ip-holder nanode for each cluster that is created within that API Region (ex. `linode-ccm-ip-holder-<region>-<ip-holder-suffix>`).
93+
94+
If you do not set `--ip-holder-suffix` on the Linode CCM, it will use the following naming convention for the ip-holder nanode (ex. `linode-ccm-ip-holder-<region>`).
9195

9296
This feature requires the Kubernetes cluster to be using [Cilium](https://cilium.io/) as the CNI with the `bgp-control-plane` feature enabled.
9397

cloud/linode/cilium_loadbalancers.go

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -282,11 +282,6 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service)
282282
if Options.IpHolderSuffix != "" {
283283
ipHolderSuffix = Options.IpHolderSuffix
284284
klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
285-
} else {
286-
if service.Namespace != "" {
287-
ipHolderSuffix = service.Namespace
288-
klog.V(3).Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
289-
}
290285
}
291286

292287
ipHolder, err := l.getIPHolder(ctx, ipHolderSuffix)
@@ -391,18 +386,21 @@ func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linode
391386

392387
// generateClusterScopedIPHolderLinodeName attempts to generate a unique name for the IP Holder
393388
// instance used alongside Cilium LoadBalancers and Shared IPs for Kubernetes Services.
394-
// The `suffix` is set to either value of the `--ip-holder-suffix` parameter, if it is set.
395-
// The backup method involves keying off the service namespace.
396-
// While it _is_ possible to have an empty suffix passed in, it would mean that kubernetes
397-
// allowed the creation of a service without a namespace, which is highly improbable.
389+
// If the `--ip-holder-suffix` arg is passed when running Linode CCM, `suffix` is set to that value.
398390
// N.B. This function uses SafeConcatName to ensure that the label is <= 63 characters.
399391
// This may result in a hash of the suffix value being used instead of the actual suffix value
400392
// that was passed to this function if the length of the suffix exceeds a certain character limit.
401-
func generateClusterScopedIPHolderLinodeName(zone, suffix string) string {
393+
func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) {
402394
// since Linode CCM consumers are varied, we require a method of providing a
403395
// suffix that does not rely on the use of a specific product (ex. LKE) to
404396
// have a specific piece of metadata (ex. annotation(s), label(s) ) present to key off of.
405-
label := SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone, suffix)
397+
398+
if suffix == "" {
399+
// this avoids a double hyphen if suffix is empty (ex. linode-ccm-ip-holder--us-ord)
400+
label = SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone)
401+
} else {
402+
label = SafeConcatName(DNS1123LabelMaxLength, ipHolderLabelPrefix, zone, suffix)
403+
}
406404
klog.V(5).Infof("generated IP Holder Linode label: %s", label)
407405
return label
408406
}

cloud/linode/cilium_loadbalancers_test.go

Lines changed: 70 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -77,13 +77,7 @@ var (
7777
Region: "us-west",
7878
IPv4: []*net.IP{&publicIPv4},
7979
}
80-
newIpHolderInstance = linodego.Instance{
81-
ID: 123456,
82-
Label: generateClusterScopedIPHolderLinodeName(zone, "linodelb"),
83-
Type: "g6-standard-1",
84-
Region: "us-west",
85-
IPv4: []*net.IP{&publicIPv4},
86-
}
80+
newIpHolderInstance = linodego.Instance{}
8781
)
8882

8983
func TestCiliumCCMLoadBalancers(t *testing.T) {
@@ -112,8 +106,12 @@ func TestCiliumCCMLoadBalancers(t *testing.T) {
112106
f: testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix,
113107
},
114108
{
115-
name: "Create Cilium Load Balancer With no existing IP holder nanode",
116-
f: testCreateWithNoExistingIPHolder,
109+
name: "Create Cilium Load Balancer With no existing IP holder nanode and short suffix",
110+
f: testCreateWithNoExistingIPHolderUsingShortSuffix,
111+
},
112+
{
113+
name: "Create Cilium Load Balancer With no existing IP holder nanode and no suffix",
114+
f: testCreateWithNoExistingIPHolderUsingNoSuffix,
117115
},
118116
{
119117
name: "Create Cilium Load Balancer With no existing IP holder nanode and 63 char long suffix",
@@ -190,10 +188,21 @@ func addNodes(t *testing.T, kubeClient kubernetes.Interface, nodes []*v1.Node) {
190188
}
191189
}
192190

191+
func createNewIpHolderInstance() linodego.Instance {
192+
return linodego.Instance{
193+
ID: 123456,
194+
Label: generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix),
195+
Type: "g6-standard-1",
196+
Region: "us-west",
197+
IPv4: []*net.IP{&publicIPv4},
198+
}
199+
}
200+
193201
func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) {
194202
Options.BGPNodeSelector = ""
195203
Options.IpHolderSuffix = "linodelb"
196204
svc := createTestService()
205+
newIpHolderInstance = createNewIpHolderInstance()
197206

198207
kubeClient, _ := k8sClient.NewFakeClientset()
199208
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
@@ -258,6 +267,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) {
258267
func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) {
259268
Options.BGPNodeSelector = "cilium-bgp-peering=true"
260269
svc := createTestService()
270+
newIpHolderInstance = createNewIpHolderInstance()
261271

262272
kubeClient, _ := k8sClient.NewFakeClientset()
263273
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
@@ -297,6 +307,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T,
297307
Options.BGPNodeSelector = "cilium-bgp-peering=true"
298308
Options.IpHolderSuffix = "linodelb"
299309
svc := createTestService()
310+
newIpHolderInstance = createNewIpHolderInstance()
300311

301312
kubeClient, _ := k8sClient.NewFakeClientset()
302313
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
@@ -336,6 +347,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi
336347
Options.BGPNodeSelector = "cilium-bgp-peering=true"
337348
Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp"
338349
svc := createTestService()
350+
newIpHolderInstance = createNewIpHolderInstance()
339351

340352
kubeClient, _ := k8sClient.NewFakeClientset()
341353
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
@@ -371,10 +383,55 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi
371383
}
372384
}
373385

374-
func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) {
386+
func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockClient) {
387+
Options.BGPNodeSelector = "cilium-bgp-peering=true"
388+
Options.IpHolderSuffix = ""
389+
svc := createTestService()
390+
newIpHolderInstance = createNewIpHolderInstance()
391+
392+
kubeClient, _ := k8sClient.NewFakeClientset()
393+
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
394+
addService(t, kubeClient, svc)
395+
addNodes(t, kubeClient, nodes)
396+
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType}
397+
398+
filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)}
399+
rawFilter, _ := json.Marshal(filter)
400+
mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil)
401+
filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)}
402+
rawFilter, _ = json.Marshal(filter)
403+
mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil)
404+
dummySharedIP := "45.76.101.26"
405+
mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil)
406+
mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{
407+
IPv4: &linodego.InstanceIPv4Response{
408+
Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}},
409+
},
410+
}, nil)
411+
mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil)
412+
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
413+
IPs: []string{dummySharedIP},
414+
LinodeID: 11111,
415+
}).Times(1)
416+
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
417+
IPs: []string{dummySharedIP},
418+
LinodeID: 22222,
419+
}).Times(1)
420+
421+
lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes)
422+
if err != nil {
423+
t.Fatalf("expected a nil error, got %v", err)
424+
}
425+
if lbStatus == nil {
426+
t.Fatal("expected non-nil lbStatus")
427+
}
428+
}
429+
430+
func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.MockClient) {
375431
Options.BGPNodeSelector = "cilium-bgp-peering=true"
376432
Options.IpHolderSuffix = "linodelb"
377433
svc := createTestService()
434+
newIpHolderInstance = createNewIpHolderInstance()
378435

379436
kubeClient, _ := k8sClient.NewFakeClientset()
380437
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
@@ -418,6 +475,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc
418475
Options.BGPNodeSelector = "cilium-bgp-peering=true"
419476
Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp"
420477
svc := createTestService()
478+
newIpHolderInstance = createNewIpHolderInstance()
421479

422480
kubeClient, _ := k8sClient.NewFakeClientset()
423481
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
@@ -487,6 +545,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi
487545
Options.BGPNodeSelector = "cilium-bgp-peering=true"
488546
Options.IpHolderSuffix = "linodelb"
489547
svc := createTestService()
548+
newIpHolderInstance = createNewIpHolderInstance()
490549

491550
kubeClient, _ := k8sClient.NewFakeClientset()
492551
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
@@ -573,6 +632,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi
573632
Options.BGPNodeSelector = "cilium-bgp-peering=true"
574633
Options.IpHolderSuffix = "linodelb"
575634
svc := createTestService()
635+
newIpHolderInstance = createNewIpHolderInstance()
576636

577637
kubeClient, _ := k8sClient.NewFakeClientset()
578638
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}

cloud/linode/loadbalancers.go

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -226,11 +226,6 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri
226226
if Options.IpHolderSuffix != "" {
227227
ipHolderSuffix = Options.IpHolderSuffix
228228
klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
229-
} else {
230-
if service.Namespace != "" {
231-
ipHolderSuffix = service.Namespace
232-
klog.Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
233-
}
234229
}
235230

236231
// CiliumLoadBalancerIPPool does not yet exist for the service
@@ -444,12 +439,8 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri
444439
if Options.IpHolderSuffix != "" {
445440
ipHolderSuffix = Options.IpHolderSuffix
446441
klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
447-
} else {
448-
if service.Namespace != "" {
449-
ipHolderSuffix = service.Namespace
450-
klog.V(3).Infof("using service-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn)
451-
}
452442
}
443+
453444
// make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers
454445
for _, node := range nodes {
455446
if err := l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil {

0 commit comments

Comments
 (0)