Skip to content

Commit

Permalink
fix(cleanup): add missing handlers for cleanup
Browse files Browse the repository at this point in the history
kube-router v2.X introduced the idea of iptables and ipset handlers that
allow kube-router to be dual-stack capable. However, the cleanup logic
for the various controllers was not properly ported when this happened.
When the cleanup functions run, they often have not had their
controllers fully initialized as cleanup should not be dependant on
kube-router being able to reach a kube-apiserver.

As such, they were missing these handlers. And as such they either
silently ended up doing noops or worse, they would run into nil pointer
failures.

This corrects that, so that kube-router no longer fails this way and
cleans up as it had in v1.X.
  • Loading branch information
aauren committed Apr 26, 2024
1 parent 7755b4a commit ecaad2c
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 96 deletions.
24 changes: 22 additions & 2 deletions pkg/controllers/netpol/network_policy_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -723,6 +723,26 @@ func (npc *NetworkPolicyController) cleanupStaleIPSets(activePolicyIPSets map[st
func (npc *NetworkPolicyController) Cleanup() {
klog.Info("Cleaning up NetworkPolicyController configurations...")

if len(npc.iptablesCmdHandlers) < 1 {
iptablesCmdHandlers, ipSetHandlers, err := NewIPTablesHandlers(nil)
if err != nil {
klog.Errorf("unable to get iptables and ipset handlers: %v", err)
return
}
npc.iptablesCmdHandlers = iptablesCmdHandlers
npc.ipSetHandlers = ipSetHandlers

// Make other structures that we rely on
npc.iptablesSaveRestore = make(map[v1core.IPFamily]utils.IPTablesSaveRestorer, 2)
npc.iptablesSaveRestore[v1core.IPv4Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv4Protocol)
npc.iptablesSaveRestore[v1core.IPv6Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv6Protocol)
npc.filterTableRules = make(map[v1core.IPFamily]*bytes.Buffer, 2)
var buf bytes.Buffer
npc.filterTableRules[v1core.IPv4Protocol] = &buf
var buf2 bytes.Buffer
npc.filterTableRules[v1core.IPv6Protocol] = &buf2
}

var emptySet map[string]bool
// Take a dump (iptables-save) of the current filter table for cleanupStaleRules() to work on
for ipFamily, iptablesSaveRestore := range npc.iptablesSaveRestore {
Expand Down Expand Up @@ -762,7 +782,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) (
iptablesCmdHandlers := make(map[v1core.IPFamily]utils.IPTablesHandler, 2)
ipSetHandlers := make(map[v1core.IPFamily]utils.IPSetHandler, 2)

if config.EnableIPv4 {
if config == nil || config.EnableIPv4 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err)
Expand All @@ -775,7 +795,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) (
}
ipSetHandlers[v1core.IPv4Protocol] = ipset
}
if config.EnableIPv6 {
if config == nil || config.EnableIPv6 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err)
Expand Down
5 changes: 2 additions & 3 deletions pkg/controllers/proxy/hairpin_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,14 @@ func (hpc *hairpinController) ensureHairpinEnabledForPodInterface(endpointIP str
// WARN: This method is deprecated and will be removed once docker-shim is removed from kubelet.
pid, err = hpc.nsc.ln.getContainerPidWithDocker(containerID)
if err != nil {
return fmt.Errorf("failed to prepare endpoint %s to do direct server return due to %v",
endpointIP, err)
return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err)
}
} else {
// We expect CRI compliant runtimes here
// ugly workaround, refactoring of pkg/Proxy is required
pid, err = hpc.nsc.ln.getContainerPidWithCRI(hpc.nsc.dsr.runtimeEndpoint, containerID)
if err != nil {
return fmt.Errorf("failed to prepare endpoint %s to do DSR due to: %v", endpointIP, err)
return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err)
}
}
klog.V(2).Infof("Found PID %d for endpoint IP %s", pid, endpointIP)
Expand Down
144 changes: 91 additions & 53 deletions pkg/controllers/proxy/network_services_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1782,6 +1782,16 @@ func (nsc *NetworkServicesController) Cleanup() {
handle.Close()
}

// In prep for further steps make sure that ipset and iptables handlers are created
if len(nsc.iptablesCmdHandlers) < 1 {
// Even though we have a config at this point (via passed param), we want to send nil so that the node will
// discover which IP address families it has and act accordingly
err = nsc.setupHandlers(nil, nil)
if err != nil {
klog.Errorf("could not cleanup because we couldn't create iptables/ipset command handlers due to: %v", err)
}
}

// cleanup iptables masquerade rule
err = nsc.deleteMasqueradeIptablesRule()
if err != nil {
Expand All @@ -1790,15 +1800,21 @@ func (nsc *NetworkServicesController) Cleanup() {
}

// cleanup iptables hairpin rules
err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol)
if err != nil {
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
return
if _, ok := nsc.iptablesCmdHandlers[v1.IPv4Protocol]; ok {
klog.Info("Processing IPv4 hairpin rule cleanup")
err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol)
if err != nil {
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
return
}
}
err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol)
if err != nil {
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
return
if _, ok := nsc.iptablesCmdHandlers[v1.IPv6Protocol]; ok {
klog.Info("Processing IPv6 hairpin rule cleanup")
err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol)
if err != nil {
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
return
}
}

nsc.cleanupIpvsFirewall()
Expand Down Expand Up @@ -1927,6 +1943,70 @@ func (nsc *NetworkServicesController) handleServiceDelete(obj interface{}) {
nsc.OnServiceUpdate(service)
}

// setupHandlers Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line
// options) and the node has an IPv6 address, the following method will return an IPv6 address
func (nsc *NetworkServicesController) setupHandlers(config *options.KubeRouterConfig, node *v1.Node) error {
// node being nil covers the case where this function is called by something that doesn't have a kube-apiserver
// connection like the cleanup code. In this instance we want all possible iptables and ipset handlers
if node != nil {
nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node)
}

// We test for nil configs as the Cleanup() method often doesn't have a valid config in this respect, so rather
// than trying to guess options, it is better to just let the logic fallthrough. For the primary path to this func,
// NewNetworkServicesController, the config will not be nil and we want to check that we have options that match
// the node's capability to ensure sanity later down the road.
if config != nil {
if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 {
return fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node")
}
}
nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0
if config != nil {
if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 {
return fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node")
}
}
nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0

nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler)
nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler)
if node == nil || len(nsc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler

ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv4Protocol] = ipset
}
if node == nil || len(nsc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler

ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv6Protocol] = ipset
}

return nil
}

// NewNetworkServicesController returns NetworkServicesController object
func NewNetworkServicesController(clientset kubernetes.Interface,
config *options.KubeRouterConfig, svcInformer cache.SharedIndexInformer,
Expand Down Expand Up @@ -2021,51 +2101,9 @@ func NewNetworkServicesController(clientset kubernetes.Interface,
return nil, err
}

// Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line options)
// and the node has an IPv6 address, the following method will return an IPv6 address
nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node)
if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 {
return nil, fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node")
}
nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0
if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 {
return nil, fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node")
}
nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0

nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler)
nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler)
if len(nsc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler

ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv4Protocol] = ipset
}
if len(nsc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler

ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv6Protocol] = ipset
err = nsc.setupHandlers(config, node)
if err != nil {
return nil, err
}

automtu, err := utils.GetMTUFromNodeIP(nsc.primaryIP)
Expand Down
100 changes: 62 additions & 38 deletions pkg/controllers/routing/network_routes_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -896,6 +896,17 @@ func (nrc *NetworkRoutingController) setupOverlayTunnel(tunnelName string, nextH
func (nrc *NetworkRoutingController) Cleanup() {
klog.Infof("Cleaning up NetworkRoutesController configurations")

// In prep for further steps make sure that ipset and iptables handlers are created
if len(nrc.iptablesCmdHandlers) < 1 {
// Even though we have a config at this point (via passed param), we want to send nil so that the node will
// discover which IP address families it has and act accordingly
err := nrc.setupHandlers(nil)
if err != nil {
klog.Errorf("could not cleanup because iptables/ipset handlers could not be created due to: %v", err)
return
}
}

// Pod egress cleanup
err := nrc.deletePodEgressRule()
if err != nil {
Expand Down Expand Up @@ -1359,7 +1370,55 @@ func (nrc *NetworkRoutingController) startBgpServer(grpcServer bool) error {
return nil
}

// func (nrc *NetworkRoutingController) getExternalNodeIPs(
func (nrc *NetworkRoutingController) setupHandlers(node *v1core.Node) error {
var err error

// node being nil covers the case where this function is called by something that doesn't have a kube-apiserver
// connection like the cleanup code. In this instance we want all possible iptables and ipset handlers
if node != nil {
nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node)
if err != nil {
klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+
"allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err)
return fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err)
}
}

nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler)
nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler)
if node == nil || len(nrc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler

ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset
}
if node == nil || len(nrc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler

ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset
}

return nil
}

// NewNetworkRoutingController returns new NetworkRoutingController object
func NewNetworkRoutingController(clientset kubernetes.Interface,
Expand Down Expand Up @@ -1473,44 +1532,9 @@ func NewNetworkRoutingController(clientset kubernetes.Interface,
}
nrc.podCidr = cidr

nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node)
err = nrc.setupHandlers(node)
if err != nil {
klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+
"allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err)
return nil, fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err)
}

nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler)
nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler)
if len(nrc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler

ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset
}
if len(nrc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler

ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset
return nil, err
}

for _, handler := range nrc.ipSetHandlers {
Expand Down

0 comments on commit ecaad2c

Please sign in to comment.