New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
whitelist traffic to cluster IP and node ports in INPUT chain to bypass netwrok policy enforcement #914
whitelist traffic to cluster IP and node ports in INPUT chain to bypass netwrok policy enforcement #914
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -54,14 +54,16 @@ const ( | |
|
||
// NetworkPolicyController strcut to hold information required by NetworkPolicyController | ||
type NetworkPolicyController struct { | ||
nodeIP net.IP | ||
nodeHostName string | ||
mu sync.Mutex | ||
syncPeriod time.Duration | ||
MetricsEnabled bool | ||
v1NetworkPolicy bool | ||
healthChan chan<- *healthcheck.ControllerHeartbeat | ||
fullSyncRequestChan chan struct{} | ||
nodeIP net.IP | ||
nodeHostName string | ||
serviceClusterIPRange string | ||
serviceNodePortRange string | ||
mu sync.Mutex | ||
syncPeriod time.Duration | ||
MetricsEnabled bool | ||
v1NetworkPolicy bool | ||
healthChan chan<- *healthcheck.ControllerHeartbeat | ||
fullSyncRequestChan chan struct{} | ||
|
||
ipSetHandler *utils.IPSet | ||
|
||
|
@@ -197,48 +199,65 @@ func (npc *NetworkPolicyController) ensureTopLevelChains() { | |
glog.Fatalf("Failed to initialize iptables executor due to %s", err.Error()) | ||
} | ||
|
||
chains := map[string]string{"INPUT": kubeInputChainName, "FORWARD": kubeForwardChainName, "OUTPUT": kubeOutputChainName} | ||
|
||
for builtinChain, customChain := range chains { | ||
err = iptablesCmdHandler.NewChain("filter", customChain) | ||
if err != nil && err.(*iptables.Error).ExitStatus() != 1 { | ||
glog.Fatalf("Failed to run iptables command to create %s chain due to %s", customChain, err.Error()) | ||
ensureRuleAtposition := func(chain string, ruleSpec []string, position int) { | ||
rules, err := iptablesCmdHandler.List("filter", chain) | ||
if err != nil { | ||
glog.Fatalf("failed to list rules in filter table %s chain due to %s", chain, err.Error()) | ||
} | ||
args := []string{"-m", "comment", "--comment", "kube-router netpol", "-j", customChain} | ||
exists, err := iptablesCmdHandler.Exists("filter", builtinChain, args...) | ||
|
||
exists, err := iptablesCmdHandler.Exists("filter", chain, ruleSpec...) | ||
if err != nil { | ||
glog.Fatalf("Failed to verify rule exists to jump to chain %s in %s chain due to %s", customChain, builtinChain, err.Error()) | ||
glog.Fatalf("Failed to verify rule exists in %s chain due to %s", chain, err.Error()) | ||
} | ||
if !exists { | ||
err := iptablesCmdHandler.Insert("filter", builtinChain, 1, args...) | ||
err := iptablesCmdHandler.Insert("filter", chain, position, ruleSpec...) | ||
if err != nil { | ||
glog.Fatalf("Failed to run iptables command to insert in %s chain %s", builtinChain, err.Error()) | ||
glog.Fatalf("Failed to run iptables command to insert in %s chain %s", chain, err.Error()) | ||
} | ||
} else { | ||
rules, err := iptablesCmdHandler.List("filter", builtinChain) | ||
if err != nil { | ||
glog.Fatalf("failed to list rules in filter table %s chain due to %s", builtinChain, err.Error()) | ||
return | ||
} | ||
var ruleNo int | ||
for i, rule := range rules { | ||
rule = strings.Replace(rule, "\"", "", 2) //removes quote from comment string | ||
if strings.Contains(rule, strings.Join(ruleSpec, " ")) { | ||
ruleNo = i | ||
break | ||
} | ||
|
||
var ruleNo int | ||
for i, rule := range rules { | ||
if strings.Contains(rule, customChain) { | ||
ruleNo = i | ||
break | ||
} | ||
} | ||
if ruleNo != position { | ||
err = iptablesCmdHandler.Insert("filter", chain, position, ruleSpec...) | ||
if err != nil { | ||
glog.Fatalf("Failed to run iptables command to insert in %s chain %s", chain, err.Error()) | ||
} | ||
if ruleNo != 1 { | ||
err = iptablesCmdHandler.Insert("filter", builtinChain, 1, args...) | ||
if err != nil { | ||
glog.Fatalf("Failed to run iptables command to insert in %s chain %s", builtinChain, err.Error()) | ||
} | ||
err = iptablesCmdHandler.Delete("filter", builtinChain, strconv.Itoa(ruleNo+1)) | ||
if err != nil { | ||
glog.Fatalf("Failed to delete wrong rule to jump to chain %s in %s chain due to %s", customChain, builtinChain, err.Error()) | ||
} | ||
err = iptablesCmdHandler.Delete("filter", chain, strconv.Itoa(ruleNo+1)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I know that you've talked about this other places in the code, but at some point in the future we should really not rely on deleting iptables rules by number (I know we do it all over the place). But there exists a possibility that since we've listed the rules, that some other process has come in and manipulated iptables causing us to potentially delete the wrong rule. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes. There is a possibility. Perhaps holding the iptables lock followed by iptables-save and iptables-restore would be a safe solution. But if there are non-cooperating processes which does not use locks we just have no way to control it with iptables. Such races has been problem: |
||
if err != nil { | ||
glog.Fatalf("Failed to delete incorrect rule in %s chain due to %s", chain, err.Error()) | ||
} | ||
} | ||
} | ||
|
||
chains := map[string]string{"INPUT": kubeInputChainName, "FORWARD": kubeForwardChainName, "OUTPUT": kubeOutputChainName} | ||
|
||
for builtinChain, customChain := range chains { | ||
err = iptablesCmdHandler.NewChain("filter", customChain) | ||
if err != nil && err.(*iptables.Error).ExitStatus() != 1 { | ||
glog.Fatalf("Failed to run iptables command to create %s chain due to %s", customChain, err.Error()) | ||
} | ||
args := []string{"-m", "comment", "--comment", "kube-router netpol", "-j", customChain} | ||
ensureRuleAtposition(builtinChain, args, 1) | ||
} | ||
|
||
whitelistServiceVips := []string{"-m", "comment", "--comment", "allow traffic to cluster IP", "-d", npc.serviceClusterIPRange, "-j", "RETURN"} | ||
ensureRuleAtposition(kubeInputChainName, whitelistServiceVips, 1) | ||
|
||
whitelistTCPNodeports := []string{"-p", "tcp", "-m", "comment", "--comment", "allow LOCAL traffic to node ports", "-m", "addrtype", "--dst-type", "LOCAL", | ||
"-m", "multiport", "--dports", npc.serviceNodePortRange, "-j", "RETURN"} | ||
ensureRuleAtposition(kubeInputChainName, whitelistTCPNodeports, 2) | ||
|
||
whitelistUDPNodeports := []string{"-p", "udp", "-m", "comment", "--comment", "allow LOCAL traffic to node ports", "-m", "addrtype", "--dst-type", "LOCAL", | ||
"-m", "multiport", "--dports", npc.serviceNodePortRange, "-j", "RETURN"} | ||
ensureRuleAtposition(kubeInputChainName, whitelistUDPNodeports, 3) | ||
|
||
} | ||
|
||
// OnPodUpdate handles updates to pods from the Kubernetes api server | ||
|
@@ -953,7 +972,7 @@ func (npc *NetworkPolicyController) syncPodFirewallChains(networkPoliciesInfo [] | |
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error()) | ||
} | ||
if !exists { | ||
err := iptablesCmdHandler.Insert("filter", chain, 1, args...) | ||
err := iptablesCmdHandler.AppendUnique("filter", chain, args...) | ||
if err != nil { | ||
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error()) | ||
} | ||
|
@@ -1780,6 +1799,9 @@ func NewNetworkPolicyController(clientset kubernetes.Interface, | |
// be up to date with all of the policy changes from any enqueued request after that | ||
npc.fullSyncRequestChan = make(chan struct{}, 1) | ||
|
||
npc.serviceClusterIPRange = config.ClusterIPCIDR | ||
npc.serviceNodePortRange = config.NodePortRange | ||
|
||
if config.MetricsEnabled { | ||
//Register the metrics for this controller | ||
prometheus.MustRegister(metrics.ControllerIptablesSyncTime) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -23,6 +23,8 @@ type KubeRouterConfig struct { | |
CleanupConfig bool | ||
ClusterAsn uint | ||
ClusterCIDR string | ||
ClusterIPCIDR string | ||
NodePortRange string | ||
DisableSrcDstCheck bool | ||
EnableCNI bool | ||
EnableiBGP bool | ||
|
@@ -74,6 +76,8 @@ func NewKubeRouterConfig() *KubeRouterConfig { | |
BGPGracefulRestartDeferralTime: 360 * time.Second, | ||
EnableOverlay: true, | ||
OverlayType: "subnet", | ||
ClusterIPCIDR: "10.96.0.0/12", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this a safe guess for all of the user's that use our daemonset's that we publish? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Its default value kubernetes uses. |
||
NodePortRange: "30000:32767", | ||
} | ||
} | ||
|
||
|
@@ -102,6 +106,10 @@ func (s *KubeRouterConfig) AddFlags(fs *pflag.FlagSet) { | |
"CIDR range of pods in the cluster. It is used to identify traffic originating from and destinated to pods.") | ||
fs.StringSliceVar(&s.ExcludedCidrs, "excluded-cidrs", s.ExcludedCidrs, | ||
"Excluded CIDRs are used to exclude IPVS rules from deletion.") | ||
fs.StringVar(&s.ClusterIPCIDR, "service-cluster-ip-range", s.ClusterIPCIDR, | ||
"CIDR value from which service cluster IPs are assigned. Default: 10.96.0.0/12") | ||
fs.StringVar(&s.NodePortRange, "service-node-port-range", s.NodePortRange, | ||
"NodePort range. Default: 30000-32767") | ||
fs.BoolVar(&s.EnablePodEgress, "enable-pod-egress", true, | ||
"SNAT traffic from Pods to destinations outside the cluster.") | ||
fs.DurationVar(&s.IPTablesSyncPeriod, "iptables-sync-period", s.IPTablesSyncPeriod, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Really minor nitpic, but we should probably do this existence checking before the
List
call above. If the iptables rule doesn't exist, then there is no reason to do the above list and it would save us an iptables call.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Agree. Will fix it.