Skip to content

Commit 47fcb98

Browse files
authored
Protect from AllocateLoadBalancerNodePorts=false (kubernetes#1988)
When aforementioned option is set to `false` on the Service `.spec`, the NodePorts stop being allocated and set for that Service, effectively defaulting all `.spec.ports[*].nodePort` to 0. Similar situation happens with `healthCheckNodePort`. When cloud provider tries to create a member or SG with such a port, Octavia fails. This commit makes sure we are not adding members when `nodePort` value is 0. We also omit the SG manipulations for such ports.
1 parent b4a7143 commit 47fcb98

File tree

1 file changed

+85
-69
lines changed

1 file changed

+85
-69
lines changed

pkg/openstack/loadbalancer.go

Lines changed: 85 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -1365,29 +1365,31 @@ func (lbaas *LbaasV2) buildBatchUpdateMemberOpts(port corev1.ServicePort, nodes
13651365
}
13661366
}
13671367

1368-
member := v2pools.BatchUpdateMemberOpts{
1369-
Address: addr,
1370-
ProtocolPort: int(port.NodePort),
1371-
Name: &node.Name,
1372-
SubnetID: &svcConf.lbMemberSubnetID,
1373-
}
1374-
if svcConf.healthCheckNodePort > 0 {
1375-
useHealthCheckNodePort := true
1376-
if lbaas.opts.LBProvider == "ovn" {
1377-
// ovn-octavia-provider doesn't support HTTP monitors at all, if we have it we got to rely on NodePort
1378-
// and UDP-CONNECT health monitor.
1379-
useHealthCheckNodePort = false
1380-
} else if port.Protocol == "UDP" {
1381-
// Older Octavia versions doesn't support HTTP monitors on UDP pools. If we have one like that, we got
1382-
// to rely on checking the NodePort instead.
1383-
useHealthCheckNodePort = openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureHTTPMonitorsOnUDP, lbaas.opts.LBProvider)
1384-
}
1385-
if useHealthCheckNodePort {
1386-
member.MonitorPort = &svcConf.healthCheckNodePort
1387-
}
1388-
}
1389-
members = append(members, member)
1390-
newMembers.Insert(fmt.Sprintf("%s-%s-%d-%d", node.Name, addr, member.ProtocolPort, svcConf.healthCheckNodePort))
1368+
if port.NodePort != 0 { // It's 0 when AllocateLoadBalancerNodePorts=False
1369+
member := v2pools.BatchUpdateMemberOpts{
1370+
Address: addr,
1371+
ProtocolPort: int(port.NodePort),
1372+
Name: &node.Name,
1373+
SubnetID: &svcConf.lbMemberSubnetID,
1374+
}
1375+
if svcConf.healthCheckNodePort > 0 {
1376+
useHealthCheckNodePort := true
1377+
if lbaas.opts.LBProvider == "ovn" {
1378+
// ovn-octavia-provider doesn't support HTTP monitors at all, if we have it we got to rely on NodePort
1379+
// and UDP-CONNECT health monitor.
1380+
useHealthCheckNodePort = false
1381+
} else if port.Protocol == "UDP" {
1382+
// Older Octavia versions doesn't support HTTP monitors on UDP pools. If we have one like that, we got
1383+
// to rely on checking the NodePort instead.
1384+
useHealthCheckNodePort = openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureHTTPMonitorsOnUDP, lbaas.opts.LBProvider)
1385+
}
1386+
if useHealthCheckNodePort {
1387+
member.MonitorPort = &svcConf.healthCheckNodePort
1388+
}
1389+
}
1390+
members = append(members, member)
1391+
newMembers.Insert(fmt.Sprintf("%s-%s-%d-%d", node.Name, addr, member.ProtocolPort, svcConf.healthCheckNodePort))
1392+
}
13911393
}
13921394
return members, newMembers, nil
13931395
}
@@ -2396,40 +2398,43 @@ func (lbaas *LbaasV2) ensureLoadBalancer(ctx context.Context, clusterName string
23962398
if err != nil && !cpoerrors.IsNotFound(err) {
23972399
return nil, fmt.Errorf("error getting pool members %s: %v", pool.ID, err)
23982400
}
2399-
for _, node := range nodes {
2400-
addr, err := nodeAddressForLB(node, "")
2401-
if err != nil {
2402-
if err == cpoerrors.ErrNotFound {
2403-
// Node failure, do not create member
2404-
klog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err)
2405-
continue
2406-
} else {
2407-
return nil, fmt.Errorf("error getting address for node %s: %v", node.Name, err)
2408-
}
2409-
}
24102401

2411-
if !memberExists(members, addr, int(port.NodePort)) {
2412-
klog.V(4).Infof("Creating member for pool %s", pool.ID)
2413-
mc := metrics.NewMetricContext("loadbalancer_member", "create")
2414-
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
2415-
Name: cutString(fmt.Sprintf("member_%d_%s_%s", portIndex, node.Name, name)),
2416-
ProtocolPort: int(port.NodePort),
2417-
Address: addr,
2418-
SubnetID: lbaas.opts.SubnetID,
2419-
}).Extract()
2420-
if mc.ObserveRequest(err) != nil {
2421-
return nil, fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err)
2402+
if port.NodePort != 0 { // It's 0 when AllocateLoadBalancerNodePorts=False
2403+
for _, node := range nodes {
2404+
addr, err := nodeAddressForLB(node, "")
2405+
if err != nil {
2406+
if err == cpoerrors.ErrNotFound {
2407+
// Node failure, do not create member
2408+
klog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err)
2409+
continue
2410+
} else {
2411+
return nil, fmt.Errorf("error getting address for node %s: %v", node.Name, err)
2412+
}
24222413
}
24232414

2424-
if err := openstackutil.WaitLoadbalancerActive(lbaas.lb, loadbalancer.ID); err != nil {
2425-
return nil, err
2415+
if !memberExists(members, addr, int(port.NodePort)) {
2416+
klog.V(4).Infof("Creating member for pool %s", pool.ID)
2417+
mc := metrics.NewMetricContext("loadbalancer_member", "create")
2418+
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
2419+
Name: cutString(fmt.Sprintf("member_%d_%s_%s", portIndex, node.Name, name)),
2420+
ProtocolPort: int(port.NodePort),
2421+
Address: addr,
2422+
SubnetID: lbaas.opts.SubnetID,
2423+
}).Extract()
2424+
if mc.ObserveRequest(err) != nil {
2425+
return nil, fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err)
2426+
}
2427+
2428+
if err := openstackutil.WaitLoadbalancerActive(lbaas.lb, loadbalancer.ID); err != nil {
2429+
return nil, err
2430+
}
2431+
} else {
2432+
// After all members have been processed, remaining members are deleted as obsolete.
2433+
members = popMember(members, addr, int(port.NodePort))
24262434
}
2427-
} else {
2428-
// After all members have been processed, remaining members are deleted as obsolete.
2429-
members = popMember(members, addr, int(port.NodePort))
2430-
}
24312435

2432-
klog.V(4).Infof("Ensured pool %s has member for %s at %s", pool.ID, node.Name, addr)
2436+
klog.V(4).Infof("Ensured pool %s has member for %s at %s", pool.ID, node.Name, addr)
2437+
}
24332438
}
24342439

24352440
// Delete obsolete members for this pool
@@ -2875,6 +2880,9 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *corev1
28752880

28762881
// ensure rules for node security group
28772882
for _, port := range ports {
2883+
if port.NodePort == 0 { // It's 0 when AllocateLoadBalancerNodePorts=False
2884+
continue
2885+
}
28782886
// If Octavia is used, the VIP port security group is already taken good care of, we only need to allow ingress
28792887
// traffic from Octavia amphorae to the node port on the worker nodes.
28802888
if lbaas.opts.UseOctavia {
@@ -3142,24 +3150,26 @@ func (lbaas *LbaasV2) updateLoadBalancer(ctx context.Context, clusterName string
31423150
}
31433151

31443152
// Add any new members for this port
3145-
for addr, node := range addrs {
3146-
if _, ok := members[addr]; ok && members[addr].ProtocolPort == int(port.NodePort) {
3147-
// Already exists, do not create member
3148-
continue
3149-
}
3150-
mc := metrics.NewMetricContext("loadbalancer_member", "create")
3151-
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
3152-
Name: cutString(fmt.Sprintf("member_%d_%s_%s", portIndex, node.Name, loadbalancer.Name)),
3153-
Address: addr,
3154-
ProtocolPort: int(port.NodePort),
3155-
SubnetID: lbaas.opts.SubnetID,
3156-
}).Extract()
3157-
if mc.ObserveRequest(err) != nil {
3158-
return err
3159-
}
3153+
if port.NodePort != 0 { // It's 0 when AllocateLoadBalancerNodePorts=False
3154+
for addr, node := range addrs {
3155+
if _, ok := members[addr]; ok && members[addr].ProtocolPort == int(port.NodePort) {
3156+
// Already exists, do not create member
3157+
continue
3158+
}
3159+
mc := metrics.NewMetricContext("loadbalancer_member", "create")
3160+
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
3161+
Name: cutString(fmt.Sprintf("member_%d_%s_%s", portIndex, node.Name, loadbalancer.Name)),
3162+
Address: addr,
3163+
ProtocolPort: int(port.NodePort),
3164+
SubnetID: lbaas.opts.SubnetID,
3165+
}).Extract()
3166+
if mc.ObserveRequest(err) != nil {
3167+
return err
3168+
}
31603169

3161-
if err := openstackutil.WaitLoadbalancerActive(lbaas.lb, loadbalancer.ID); err != nil {
3162-
return err
3170+
if err := openstackutil.WaitLoadbalancerActive(lbaas.lb, loadbalancer.ID); err != nil {
3171+
return err
3172+
}
31633173
}
31643174
}
31653175

@@ -3258,6 +3268,9 @@ func (lbaas *LbaasV2) ensureAndUpdateOctaviaSecurityGroup(clusterName string, ap
32583268

32593269
// ensure rules for node security group
32603270
for _, port := range ports {
3271+
if port.NodePort == 0 { // It's 0 when AllocateLoadBalancerNodePorts=False
3272+
continue
3273+
}
32613274
err = lbaas.ensureSecurityRule(
32623275
rules.DirIngress,
32633276
rules.RuleProtocol(port.Protocol),
@@ -3315,6 +3328,9 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *corev1
33153328
}
33163329

33173330
for _, port := range ports {
3331+
if port.NodePort == 0 { // It's 0 when AllocateLoadBalancerNodePorts=False
3332+
continue
3333+
}
33183334
for removal := range removals {
33193335
// Delete the rules in the Node Security Group
33203336
opts := rules.ListOpts{

0 commit comments

Comments
 (0)