Skip to content

Commit

Permalink
UPSTREAM: <carry>: Prefer local endpoint for cluster DNS service
Browse files Browse the repository at this point in the history
This commit fixes bug 1919737.

https://bugzilla.redhat.com/show_bug.cgi?id=1919737

* pkg/proxy/iptables/proxier.go (syncProxyRules): Prefer a local endpoint
for the cluster DNS service.
  • Loading branch information
Miciah authored and JacobTanenbaum committed Feb 1, 2023
1 parent 1b2eacb commit 9afeab0
Show file tree
Hide file tree
Showing 2 changed files with 197 additions and 0 deletions.
15 changes: 15 additions & 0 deletions pkg/proxy/iptables/proxier.go
Expand Up @@ -984,6 +984,21 @@ func (proxier *Proxier) syncProxyRules() {
allEndpoints := proxier.endpointsMap[svcName]
clusterEndpoints, localEndpoints, allLocallyReachableEndpoints, hasEndpoints := proxy.CategorizeEndpoints(allEndpoints, svcInfo, proxier.nodeLabels)

// Prefer local endpoint for the DNS service.
// Fixes <https://bugzilla.redhat.com/show_bug.cgi?id=1919737>.
// TODO: Delete this once node-level topology is
// implemented and the DNS operator is updated to use it.
if svcPortNameString == "openshift-dns/dns-default:dns" {
for _, ep := range clusterEndpoints {
if ep.GetIsLocal() {
klog.V(4).Infof("Found a local endpoint %q for service %q; preferring the local endpoint and ignoring %d other endpoints", ep.String(), svcPortNameString, len(clusterEndpoints) - 1)
clusterEndpoints = []proxy.Endpoint{ep}
allLocallyReachableEndpoints = clusterEndpoints
break
}
}
}

// Note the endpoint chains that will be used
for _, ep := range allLocallyReachableEndpoints {
if epInfo, ok := ep.(*endpointsInfo); ok {
Expand Down
182 changes: 182 additions & 0 deletions pkg/proxy/iptables/proxier_test.go
Expand Up @@ -2209,6 +2209,188 @@ func TestClusterIPEndpointsJump(t *testing.T) {
})
}

func TestOpenShiftDNSHack(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt)
svcIP := "172.30.0.10"
svcPort := 53
podPort := 5353
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("openshift-dns", "dns-default"),
Port: "dns",
Protocol: v1.ProtocolUDP,
}

makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: svcPortName.Protocol,
}}
}),
)

populateEndpointSlices(fp,
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
eps.AddressType = discovery.AddressTypeIPv4
eps.Endpoints = []discovery.Endpoint{{
// This endpoint is ignored because it's remote
Addresses: []string{"10.180.0.2"},
NodeName: utilpointer.StringPtr("node2"),
}, {
Addresses: []string{"10.180.0.1"},
NodeName: utilpointer.StringPtr(testHostname),
}}
eps.Ports = []discovery.EndpointPort{{
Name: utilpointer.StringPtr(svcPortName.Port),
Port: utilpointer.Int32(int32(podPort)),
Protocol: &svcPortName.Protocol,
}}
}),
)

// Deal with UDP conntrack stuff
fakeExec := fp.exec.(*fakeexec.FakeExec)
fakeExec.LookPathFunc = func(cmd string) (string, error) { return cmd, nil }
fcmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) { return []byte("1 flow entries have been deleted"), nil, nil },
},
}
fakeExec.CommandScript = []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
}

fp.syncProxyRules()

expected := dedent.Dedent(`
*filter
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-PROXY-FIREWALL - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-DYOI7QYSVZXR6VUA - [0:0]
:KUBE-SVC-BGNS3J6UB7MMLVDO - [0:0]
-A KUBE-SERVICES -m comment --comment "openshift-dns/dns-default:dns cluster IP" -m udp -p udp -d 172.30.0.10 --dport 53 -j KUBE-SVC-BGNS3J6UB7MMLVDO
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-DYOI7QYSVZXR6VUA -m comment --comment openshift-dns/dns-default:dns -s 10.180.0.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-DYOI7QYSVZXR6VUA -m comment --comment openshift-dns/dns-default:dns -m udp -p udp -j DNAT --to-destination 10.180.0.1:5353
-A KUBE-SVC-BGNS3J6UB7MMLVDO -m comment --comment "openshift-dns/dns-default:dns cluster IP" -m udp -p udp -d 172.30.0.10 --dport 53 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-BGNS3J6UB7MMLVDO -m comment --comment "openshift-dns/dns-default:dns -> 10.180.0.1:5353" -j KUBE-SEP-DYOI7QYSVZXR6VUA
COMMIT
`)
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
}

func TestOpenShiftDNSHackFallback(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt)
svcIP := "172.30.0.10"
svcPort := 53
podPort := 5353
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("openshift-dns", "dns-default"),
Port: "dns",
Protocol: v1.ProtocolUDP,
}

makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: svcPortName.Protocol,
}}
}),
)

populateEndpointSlices(fp,
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
eps.AddressType = discovery.AddressTypeIPv4
// Both endpoints are used because neither is local
eps.Endpoints = []discovery.Endpoint{{
Addresses: []string{"10.180.1.2"},
NodeName: utilpointer.StringPtr("node2"),
}, {
Addresses: []string{"10.180.2.3"},
NodeName: utilpointer.StringPtr("node3"),
}}
eps.Ports = []discovery.EndpointPort{{
Name: utilpointer.StringPtr(svcPortName.Port),
Port: utilpointer.Int32(int32(podPort)),
Protocol: &svcPortName.Protocol,
}}
}),
)

// Deal with UDP conntrack stuff
fakeExec := fp.exec.(*fakeexec.FakeExec)
fakeExec.LookPathFunc = func(cmd string) (string, error) { return cmd, nil }
fcmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) { return []byte("1 flow entries have been deleted"), nil, nil },
},
}
fakeExec.CommandScript = []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
}

fp.syncProxyRules()

expected := dedent.Dedent(`
*filter
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-PROXY-FIREWALL - [0:0]
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-HBTKKYYQD5LT7Q5V - [0:0]
:KUBE-SEP-UJYYVAPCRGSFEWJK - [0:0]
:KUBE-SVC-BGNS3J6UB7MMLVDO - [0:0]
-A KUBE-SERVICES -m comment --comment "openshift-dns/dns-default:dns cluster IP" -m udp -p udp -d 172.30.0.10 --dport 53 -j KUBE-SVC-BGNS3J6UB7MMLVDO
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-HBTKKYYQD5LT7Q5V -m comment --comment openshift-dns/dns-default:dns -s 10.180.1.2 -j KUBE-MARK-MASQ
-A KUBE-SEP-HBTKKYYQD5LT7Q5V -m comment --comment openshift-dns/dns-default:dns -m udp -p udp -j DNAT --to-destination 10.180.1.2:5353
-A KUBE-SEP-UJYYVAPCRGSFEWJK -m comment --comment openshift-dns/dns-default:dns -s 10.180.2.3 -j KUBE-MARK-MASQ
-A KUBE-SEP-UJYYVAPCRGSFEWJK -m comment --comment openshift-dns/dns-default:dns -m udp -p udp -j DNAT --to-destination 10.180.2.3:5353
-A KUBE-SVC-BGNS3J6UB7MMLVDO -m comment --comment "openshift-dns/dns-default:dns cluster IP" -m udp -p udp -d 172.30.0.10 --dport 53 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-BGNS3J6UB7MMLVDO -m comment --comment "openshift-dns/dns-default:dns -> 10.180.1.2:5353" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-HBTKKYYQD5LT7Q5V
-A KUBE-SVC-BGNS3J6UB7MMLVDO -m comment --comment "openshift-dns/dns-default:dns -> 10.180.2.3:5353" -j KUBE-SEP-UJYYVAPCRGSFEWJK
COMMIT
`)
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
}

func TestLoadBalancer(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt)
Expand Down

0 comments on commit 9afeab0

Please sign in to comment.