Skip to content

Commit 30bc1b5

Browse files
committed
Add unit tests to validate "bad IP/CIDR" handling in kube-proxy
Also, fix the handling of bad EndpointSlice IPs!
1 parent f762e5c commit 30bc1b5

File tree

3 files changed

+156
-1
lines changed

3 files changed

+156
-1
lines changed

pkg/proxy/endpointslicecache.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,8 @@ func (cache *EndpointSliceCache) addEndpoints(svcPortName *ServicePortName, port
235235
}
236236
}
237237

238-
endpointInfo := newBaseEndpointInfo(endpoint.Addresses[0], portNum, isLocal,
238+
endpointIP := utilnet.ParseIPSloppy(endpoint.Addresses[0]).String()
239+
endpointInfo := newBaseEndpointInfo(endpointIP, portNum, isLocal,
239240
ready, serving, terminating, zoneHints)
240241

241242
// This logic ensures we're deduplicating potential overlapping endpoints

pkg/proxy/iptables/proxier_test.go

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6775,3 +6775,90 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
67756775
})
67766776
}
67776777
}
6778+
6779+
// TestBadIPs tests that "bad" IPs and CIDRs in Services/Endpoints are rewritten to
6780+
// be "good" in the input provided to iptables-restore
6781+
func TestBadIPs(t *testing.T) {
6782+
ipt := iptablestest.NewFake()
6783+
fp := NewFakeProxier(ipt)
6784+
metrics.RegisterMetrics(kubeproxyconfig.ProxyModeIPTables)
6785+
6786+
makeServiceMap(fp,
6787+
makeTestService("ns1", "svc1", func(svc *v1.Service) {
6788+
svc.Spec.Type = "LoadBalancer"
6789+
svc.Spec.ClusterIP = "172.30.0.041"
6790+
svc.Spec.Ports = []v1.ServicePort{{
6791+
Name: "p80",
6792+
Port: 80,
6793+
Protocol: v1.ProtocolTCP,
6794+
NodePort: 3001,
6795+
}}
6796+
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
6797+
IP: "1.2.3.004",
6798+
}}
6799+
svc.Spec.ExternalIPs = []string{"192.168.099.022"}
6800+
svc.Spec.LoadBalancerSourceRanges = []string{"203.0.113.000/025"}
6801+
}),
6802+
)
6803+
populateEndpointSlices(fp,
6804+
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
6805+
eps.AddressType = discovery.AddressTypeIPv4
6806+
eps.Endpoints = []discovery.Endpoint{{
6807+
Addresses: []string{"10.180.00.001"},
6808+
}}
6809+
eps.Ports = []discovery.EndpointPort{{
6810+
Name: ptr.To("p80"),
6811+
Port: ptr.To[int32](80),
6812+
Protocol: ptr.To(v1.ProtocolTCP),
6813+
}}
6814+
}),
6815+
)
6816+
6817+
fp.syncProxyRules()
6818+
6819+
expected := dedent.Dedent(`
6820+
*filter
6821+
:KUBE-NODEPORTS - [0:0]
6822+
:KUBE-SERVICES - [0:0]
6823+
:KUBE-EXTERNAL-SERVICES - [0:0]
6824+
:KUBE-FIREWALL - [0:0]
6825+
:KUBE-FORWARD - [0:0]
6826+
:KUBE-PROXY-FIREWALL - [0:0]
6827+
-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
6828+
-A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP
6829+
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
6830+
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
6831+
-A KUBE-PROXY-FIREWALL -m comment --comment "ns1/svc1:p80 traffic not accepted by KUBE-FW-XPGD46QRK7WJZT7O" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
6832+
COMMIT
6833+
*nat
6834+
:KUBE-NODEPORTS - [0:0]
6835+
:KUBE-SERVICES - [0:0]
6836+
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
6837+
:KUBE-FW-XPGD46QRK7WJZT7O - [0:0]
6838+
:KUBE-MARK-MASQ - [0:0]
6839+
:KUBE-POSTROUTING - [0:0]
6840+
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
6841+
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
6842+
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp -d 127.0.0.0/8 --dport 3001 -m nfacct --nfacct-name localhost_nps_accepted_pkts -j KUBE-EXT-XPGD46QRK7WJZT7O
6843+
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O
6844+
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
6845+
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O
6846+
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O
6847+
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
6848+
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ
6849+
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O
6850+
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-XPGD46QRK7WJZT7O
6851+
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "other traffic to ns1/svc1:p80 will be dropped by KUBE-PROXY-FIREWALL"
6852+
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
6853+
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
6854+
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
6855+
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
6856+
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
6857+
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
6858+
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
6859+
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
6860+
COMMIT
6861+
`)
6862+
6863+
assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
6864+
}

pkg/proxy/nftables/proxier_test.go

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4835,3 +4835,70 @@ func TestProxier_OnServiceCIDRsChanged(t *testing.T) {
48354835
proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"})
48364836
assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112,fd00:172:30::/112")
48374837
}
4838+
4839+
// TestBadIPs tests that "bad" IPs and CIDRs in Services/Endpoints are rewritten to
4840+
// be "good" in the input provided to nft
4841+
func TestBadIPs(t *testing.T) {
4842+
nft, fp := NewFakeProxier(v1.IPv4Protocol)
4843+
metrics.RegisterMetrics(kubeproxyconfig.ProxyModeNFTables)
4844+
4845+
makeServiceMap(fp,
4846+
makeTestService("ns1", "svc1", func(svc *v1.Service) {
4847+
svc.Spec.Type = "LoadBalancer"
4848+
svc.Spec.ClusterIP = "172.30.0.041"
4849+
svc.Spec.Ports = []v1.ServicePort{{
4850+
Name: "p80",
4851+
Port: 80,
4852+
Protocol: v1.ProtocolTCP,
4853+
NodePort: 3001,
4854+
}}
4855+
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
4856+
IP: "1.2.3.004",
4857+
}}
4858+
svc.Spec.ExternalIPs = []string{"192.168.099.022"}
4859+
svc.Spec.LoadBalancerSourceRanges = []string{"203.0.113.000/025"}
4860+
}),
4861+
)
4862+
populateEndpointSlices(fp,
4863+
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
4864+
eps.AddressType = discovery.AddressTypeIPv4
4865+
eps.Endpoints = []discovery.Endpoint{{
4866+
Addresses: []string{"10.180.00.001"},
4867+
}}
4868+
eps.Ports = []discovery.EndpointPort{{
4869+
Name: ptr.To("p80"),
4870+
Port: ptr.To[int32](80),
4871+
Protocol: ptr.To(v1.ProtocolTCP),
4872+
}}
4873+
}),
4874+
)
4875+
4876+
fp.syncProxyRules()
4877+
4878+
expected := baseRules + dedent.Dedent(`
4879+
# svc1
4880+
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
4881+
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
4882+
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 }
4883+
4884+
add chain ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80
4885+
add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 jump mark-for-masquerade
4886+
add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
4887+
4888+
add chain ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80
4889+
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 jump mark-for-masquerade
4890+
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80
4891+
4892+
add chain ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80
4893+
add rule ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80 ip saddr != { 203.0.113.0/25 } drop
4894+
4895+
add element ip kube-proxy cluster-ips { 172.30.0.41 }
4896+
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
4897+
add element ip kube-proxy service-ips { 192.168.99.22 . tcp . 80 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 }
4898+
add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 }
4899+
add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 }
4900+
add element ip kube-proxy firewall-ips { 1.2.3.4 . tcp . 80 : goto firewall-ULMVA6XW-ns1/svc1/tcp/p80 }
4901+
`)
4902+
4903+
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
4904+
}

0 commit comments

Comments
 (0)