Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Explicitly add iptables rule to allow healthcheck nodeport #97824

Merged
merged 1 commit into from Feb 4, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
18 changes: 16 additions & 2 deletions pkg/proxy/iptables/proxier.go
Expand Up @@ -395,6 +395,7 @@ type iptablesJumpChain struct {
var iptablesJumpChains = []iptablesJumpChain{
{utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainInput, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainForward, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeNodePortsChain, utiliptables.ChainInput, "kubernetes health check service ports", nil},
{utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainForward, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil},
Expand Down Expand Up @@ -479,7 +480,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
filterChains := bytes.NewBuffer(nil)
filterRules := bytes.NewBuffer(nil)
utilproxy.WriteLine(filterChains, "*filter")
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain} {
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} {
if _, found := existingFilterChains[chain]; found {
chainString := string(chain)
utilproxy.WriteBytesLine(filterChains, existingFilterChains[chain])
Expand Down Expand Up @@ -933,7 +934,7 @@ func (proxier *Proxier) syncProxyRules() {

// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain} {
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} {
if chain, ok := existingFilterChains[chainName]; ok {
utilproxy.WriteBytesLine(proxier.filterChains, chain)
} else {
Expand Down Expand Up @@ -1337,6 +1338,19 @@ func (proxier *Proxier) syncProxyRules() {
}
}

// Capture healthCheckNodePorts.
if svcInfo.HealthCheckNodePort() != 0 {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should go inside the if svcInfo.NodePort() != 0 section

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Node ports are now optional for load balancers (see spec.allocateLoadBalancerNodePorts), so a separate check still makes sense since healthCheckNodePorts is always set regardless of whether node ports is enabled.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Second to @andrewsykim, I think these are effectively the same, for now, HealthCheckNodePort will be non-zero only if the svc is of LoadBalancer type. And if the service is of LoadBalancer type, then it should have a non-zero NodePort. To me it's api-server's job to validate the spec and ensure all the svc proxy sees are valid (see my previous comment). If couple the logic here then if someday the validation logic changed, then we also need to fix it here.

// no matter if node has local endpoints, healthCheckNodePorts
// need to add a rule to accept the incoming connection
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s health check node port"`, svcNameString),
"-m", "tcp", "-p", "tcp",
"--dport", strconv.Itoa(svcInfo.HealthCheckNodePort()),
"-j", "ACCEPT",
)
}

if !hasEndpoints {
continue
}
Expand Down
164 changes: 163 additions & 1 deletion pkg/proxy/iptables/proxier_test.go
Expand Up @@ -454,6 +454,14 @@ func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
}
}

// fakeCloseable implements utilproxy.Closeable
type fakeCloseable struct{}

// Close fakes out the close() used by syncProxyRules to release a local port.
func (f *fakeCloseable) Close() error {
return nil
}

// fakePortOpener implements portOpener.
type fakePortOpener struct {
openPorts []*utilproxy.LocalPort
Expand All @@ -463,7 +471,7 @@ type fakePortOpener struct {
// to lock a local port.
func (f *fakePortOpener) OpenLocalPort(lp *utilproxy.LocalPort, isIPv6 bool) (utilproxy.Closeable, error) {
f.openPorts = append(f.openPorts, lp)
return nil, nil
return &fakeCloseable{}, nil
}

const testHostname = "test-hostname"
Expand Down Expand Up @@ -913,6 +921,50 @@ func TestNodePort(t *testing.T) {
}
}

func TestHealthCheckNodePort(t *testing.T) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hiya @hanlins , looking at this unit test, do we also need a sig-network E2E for this?

if not, what E2Es break if the default Drop is setup and this rule is non existent.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh true. I was thinking about it early today but don't know if there's a way to change the e2e VM settings? My concern is if I just modify the INPUT chain policy to DROP then we might lose ssh connection to that VM instantly and failed the test (and lose the VMs, maybe other risks). Also I would say if we change the chain policy to DROP then local pod to pod VM will break since there's no rule to allow traffic from/to lo interface. Personally that seems to be a bigger topic than just focusing on fixing the healthcheck node port issue, maybe we can create new issues to track them? Maybe one for adding a rule to enable traffic to/from lo interface, and the other to address the e2e test coverage, what's ur take? 😆

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no need to make an e2e for it, i guess all that matters is... knowing which of the sig-network e2e's we can use to detect this regression?

i bet theres something that requires nodeport healthchecks (i.e. maybe in test/e2e/network/services.go) which demonstrates this bug ?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have quite a few e2es already on health check node port because it's required for any Service LB w/ externalTrafficPolicy=Local. The tricky part is testing this on a node using default INPUT policy of DROP.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My concern is if I just modify the INPUT chain policy to DROP then we might lose ssh connection to that VM instantly

You don't need to change the default INPUT chain policy, you can just do

iptables -A INPUT -p tcp --dport <PORT> -j DROP

or whatever

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

iptables -A INPUT -p tcp --dport -j DROP

This rule would conflict with the rules we add from kube-proxy though.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This rule would conflict with the rules we add from kube-proxy though.

That's the goal! Add a rule (to the very end of the INPUT chain) that intentionally drops packets directed just at this health check port. (When you quoted my reply the "PORT" was missing, but the rule as I originally typed it was supposed to apply only to that particular part.) The new -j ACCEPT rule added by this PR should run before that rule gets hit, rendering it irrelevant.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That depends on if we prepend or append though right? Do we always prepend filter rules?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The actual per-service rules get added to kubernetes-specific chains, not to the top-level INPUT, OUTPUT, etc, chains. We should be able to arrange for a single rule added by a test case to be added to the right place relative to the -j KUBE-WHATEVER rule in INPUT.

ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, false)
svcIP := "10.20.30.42"
svcPort := 80
svcNodePort := 3001
svcHealthCheckNodePort := 30000
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
}

makeServiceMap(fp,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
svc.Spec.Type = "LoadBalancer"
svc.Spec.ClusterIP = svcIP
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort),
}}
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}),
)

itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{utilproxytest.AddrStruct{Val: "127.0.0.1/16"}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{utilproxytest.AddrStruct{Val: "::1/128"}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"127.0.0.1/16"}

fp.syncProxyRules()

kubeNodePortsRules := ipt.GetRules(string(kubeNodePortsChain))
if !hasJump(kubeNodePortsRules, iptablestest.Accept, "", svcHealthCheckNodePort) {
errorf(fmt.Sprintf("Failed to find Accept rule"), kubeNodePortsRules, t)
}
}

func TestMasqueradeRule(t *testing.T) {
for _, testcase := range []bool{false, true} {
ipt := iptablestest.NewFake().SetHasRandomFully(testcase)
Expand Down Expand Up @@ -2594,6 +2646,7 @@ func TestEndpointSliceE2E(t *testing.T) {
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we add a test like TestEndpointSliceE2E but with a Service with health check node ports? Will help compare the whole rule set

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good call, will do that.

-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
Expand Down Expand Up @@ -2686,4 +2739,113 @@ COMMIT
assert.NotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
}

func TestHealthCheckNodePortE2E(t *testing.T) {
expectedIPTables := `*filter
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORTS - [0:0]
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-SERVICES - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -j KUBE-XLB-AQI2S6QIMU7PVVRP
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
COMMIT
`

ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt, true)
fp.OnServiceSynced()
fp.OnEndpointsSynced()
fp.OnEndpointSlicesSynced()

serviceName := "svc1"
namespaceName := "ns1"

svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.20.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), NodePort: 30010, Protocol: v1.ProtocolTCP}},
Type: "LoadBalancer",
HealthCheckNodePort: 30000,
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
},
}
fp.OnServiceAdd(svc)

tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: utilpointer.StringPtr(""),
Port: utilpointer.Int32Ptr(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
}, {
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node2"},
}, {
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
Topology: map[string]string{"kubernetes.io/hostname": "node3"},
}, { // not ready endpoints should be ignored
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
Topology: map[string]string{"kubernetes.io/hostname": "node4"},
}},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assert.Equal(t, expectedIPTables, fp.iptablesData.String())

fp.OnServiceDelete(svc)
fp.syncProxyRules()
assert.NotEqual(t, expectedIPTables, fp.iptablesData.String())
}

// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
3 changes: 3 additions & 0 deletions pkg/proxy/ipvs/ipset.go
Expand Up @@ -75,6 +75,9 @@ const (

kubeNodePortLocalSetSCTPComment = "Kubernetes nodeport SCTP port with externalTrafficPolicy=local with type 'hash ip:port'"
kubeNodePortLocalSetSCTP = "KUBE-NODE-PORT-LOCAL-SCTP-HASH"

kubeHealthCheckNodePortSetComment = "Kubernetes health check node port"
kubeHealthCheckNodePortSet = "KUBE-HEALTH-CHECK-NODE-PORT"
)

// IPSetVersioner can query the current ipset version.
Expand Down
27 changes: 27 additions & 0 deletions pkg/proxy/ipvs/proxier.go
Expand Up @@ -106,6 +106,7 @@ var iptablesJumpChain = []struct {
{utiliptables.TableNAT, utiliptables.ChainPrerouting, kubeServicesChain, "kubernetes service portals"},
{utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, "kubernetes postrouting rules"},
{utiliptables.TableFilter, utiliptables.ChainForward, KubeForwardChain, "kubernetes forwarding rules"},
{utiliptables.TableFilter, utiliptables.ChainInput, KubeNodePortChain, "kubernetes health check rules"},
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm less familiar with the IPVS rules but I believe the comment I made about iptables should apply here too

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah same reason as my previous comment why we need to keep this rule.

}

var iptablesChains = []struct {
Expand All @@ -119,6 +120,7 @@ var iptablesChains = []struct {
{utiliptables.TableNAT, KubeLoadBalancerChain},
{utiliptables.TableNAT, KubeMarkMasqChain},
{utiliptables.TableFilter, KubeForwardChain},
{utiliptables.TableFilter, KubeNodePortChain},
}

var iptablesEnsureChains = []struct {
Expand Down Expand Up @@ -161,6 +163,7 @@ var ipsetInfo = []struct {
{kubeNodePortLocalSetUDP, utilipset.BitmapPort, kubeNodePortLocalSetUDPComment},
{kubeNodePortSetSCTP, utilipset.HashIPPort, kubeNodePortSetSCTPComment},
{kubeNodePortLocalSetSCTP, utilipset.HashIPPort, kubeNodePortLocalSetSCTPComment},
{kubeHealthCheckNodePortSet, utilipset.BitmapPort, kubeHealthCheckNodePortSetComment},
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if you also need to add something to ipsetWithIptablesChain...

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Aha that's a tricky one, at first I thought I should add it here, but then I realized that ipsetWithIptablesChain is for nat table where we need to add it in filter table. I've already added the rule below:

       // Add rule to accept traffic towards health check node port
       utilproxy.WriteLine(proxier.filterRules,
               "-A", string(KubeNodePortChain),
               "-m", "comment", "--comment", proxier.ipsetList[kubeHealthCheckNodePortSet].getComment(),
               "-m", "set", "--match-set", proxier.ipsetList[kubeHealthCheckNodePortSet].Name, "dst",
               "-j", "ACCEPT",
       )

}

// ipsetWithIptablesChain is the ipsets list with iptables source chain and the chain jump to
Expand Down Expand Up @@ -1581,6 +1584,22 @@ func (proxier *Proxier) syncProxyRules() {
}
}
}

if svcInfo.HealthCheckNodePort() != 0 {
nodePortSet := proxier.ipsetList[kubeHealthCheckNodePortSet]
entry := &utilipset.Entry{
// No need to provide ip info
Port: svcInfo.HealthCheckNodePort(),
Protocol: "tcp",
SetType: utilipset.BitmapPort,
}

if valid := nodePortSet.validateEntry(entry); !valid {
klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortSet.Name))
continue
}
nodePortSet.activeEntries.Insert(entry.String())
}
}

// sync ipset entries
Expand Down Expand Up @@ -1817,6 +1836,14 @@ func (proxier *Proxier) writeIptablesRules() {
"-j", "ACCEPT",
)

// Add rule to accept traffic towards health check node port
utilproxy.WriteLine(proxier.filterRules,
"-A", string(KubeNodePortChain),
"-m", "comment", "--comment", proxier.ipsetList[kubeHealthCheckNodePortSet].getComment(),
"-m", "set", "--match-set", proxier.ipsetList[kubeHealthCheckNodePortSet].Name, "dst",
"-j", "ACCEPT",
)

// Install the kubernetes-specific postrouting rules. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
Expand Down