Skip to content

Commit

Permalink
Fix ClusterSet scope policy rule not compatible with namespaces field
Browse files Browse the repository at this point in the history
Signed-off-by: Dyanngg <dingyang@vmware.com>
  • Loading branch information
Dyanngg committed Jan 18, 2023
1 parent 8961420 commit c587578
Show file tree
Hide file tree
Showing 7 changed files with 93 additions and 57 deletions.
12 changes: 11 additions & 1 deletion multicluster/apis/multicluster/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

44 changes: 19 additions & 25 deletions pkg/controller/labelidentity/label_group_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ type Interface interface {
// GetLabelIdentityIDs retrieves the label identity IDs selected by the provided selectorItem keys.
GetLabelIdentityIDs(selectorKey string) []uint32
// SetPolicySelectors registers a policy's selectors with the index.
SetPolicySelectors(selectors []*types.GroupSelector, policyKey string) []uint32
SetPolicySelectors(selectors []*types.GroupSelector, policyKey string)
// DeletePolicySelectors removes any selectors from referring to the policy being deleted.
DeletePolicySelectors(policyKey string)
// AddLabelIdentity adds LabelIdentity-ID mapping to the index.
Expand Down Expand Up @@ -272,6 +272,20 @@ func (i *LabelIdentityIndex) AddSelector(selector *types.GroupSelector, policyKe
return i.getMatchedLabelIdentityIDs(sItem)
}

// Dedup LabelIdentity IDs in-place.
func DedupLabelIdentites(labelIdentityIDs []uint32) []uint32 {
seen := map[uint32]struct{}{}
idx := 0
for _, id := range labelIdentityIDs {
if _, exists := seen[id]; !exists {
seen[id] = struct{}{}
labelIdentityIDs[idx] = id
idx++
}
}
return labelIdentityIDs[:idx]
}

// DeleteSelector removes a selectorItem from referring to the policy being deleted.
func (i *LabelIdentityIndex) DeleteSelector(selectorKey string, policyKey string) {
i.lock.Lock()
Expand Down Expand Up @@ -301,43 +315,23 @@ func (i *LabelIdentityIndex) deleteSelector(selectorKey string, policyKey string

// SetPolicySelectors registers ClusterSet-scope policy selectors with the labelIdentityIndex,
// and then retrieves all the LabelIdentity IDs that currently match these selectors.
func (i *LabelIdentityIndex) SetPolicySelectors(selectors []*types.GroupSelector, policyKey string) []uint32 {
var labelIdentityIDs []uint32
func (i *LabelIdentityIndex) SetPolicySelectors(selectors []*types.GroupSelector, policyKey string) {
newSelectors := map[string]*types.GroupSelector{}
for _, s := range selectors {
klog.V(4).InfoS("Getting matched LabelIdentity for policy selector", "selector", s.NormalizedName, "policy", policyKey)
newSelectors[s.NormalizedName] = s
}
originalSelectors := i.getPolicySelectors(policyKey)
for selKey, sel := range newSelectors {
for selKey := range newSelectors {
if _, exists := originalSelectors[selKey]; exists {
// These clusterset-scoped selectors are already bound to the policy in labelIdentityIndex.
// We can simply read matched label identity IDs from the index.
selectedLabelIDs := i.GetLabelIdentityIDs(selKey)
labelIdentityIDs = append(labelIdentityIDs, selectedLabelIDs...)
// Remove matched clusterset-scoped selectors of the policy before and after the update.
// The selectors remaining in originalSelectors will need to be removed from the policy.
// Remove matched ClusterSet-scoped selectors of the policy before and after the update.
// The selectors remaining in originalSelectors will need to be unbounded from the policy.
delete(originalSelectors, selKey)
} else {
selectedLabelIDs := i.AddSelector(sel, policyKey)
labelIdentityIDs = append(labelIdentityIDs, selectedLabelIDs...)
}
}
// The policy no longer has these selectors.
for selectorKey := range originalSelectors {
i.DeleteSelector(selectorKey, policyKey)
}
// Dedup label identity IDs in-place.
seen := map[uint32]struct{}{}
idx := 0
for _, id := range labelIdentityIDs {
if _, exists := seen[id]; !exists {
seen[id] = struct{}{}
labelIdentityIDs[idx] = id
idx++
}
}
return labelIdentityIDs[:idx]
}

func (i *LabelIdentityIndex) getPolicySelectors(policyKey string) map[string]*types.GroupSelector {
Expand Down
32 changes: 21 additions & 11 deletions pkg/controller/labelidentity/label_group_index_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -331,10 +331,16 @@ func TestSetPolicySelectors(t *testing.T) {
i.AddLabelIdentity(labelB, 2)
i.AddLabelIdentity(labelC, 3)
if tt.prevPolicyAdded != "" {
i.SetPolicySelectors(tt.prevSelAdded, tt.prevPolicyAdded)
for _, sel := range tt.prevSelAdded {
i.AddSelector(sel, tt.prevPolicyAdded)
}
}
var labelIDs []uint32
for _, sel := range tt.selectors {
labelIDs = append(labelIDs, i.AddSelector(sel, tt.policyKey)...)
}
matchedIDs := i.SetPolicySelectors(tt.selectors, tt.policyKey)
assert.ElementsMatch(t, tt.expIDs, matchedIDs)
i.SetPolicySelectors(tt.selectors, tt.policyKey)
assert.ElementsMatch(t, tt.expIDs, DedupLabelIdentites(labelIDs))
assert.Equalf(t, len(tt.expSelectorItems), len(i.selectorItems.List()), "Unexpected number of cached selectorItems")
for selKey, expSelItem := range tt.expSelectorItems {
s, exists, _ := i.selectorItems.GetByKey(selKey)
Expand Down Expand Up @@ -412,10 +418,12 @@ func TestAddLabelIdentity(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
i := NewLabelIdentityIndex()
i.SetPolicySelectors([]*types.GroupSelector{selectorA, selectorC}, "policyA")
i.SetPolicySelectors([]*types.GroupSelector{selectorB, selectorC}, "policyB")
i.SetPolicySelectors([]*types.GroupSelector{selectorD}, "policyD")
i.SetPolicySelectors([]*types.GroupSelector{selectorE}, "policyE")
i.AddSelector(selectorA, "policyA")
i.AddSelector(selectorC, "policyA")
i.AddSelector(selectorB, "policyB")
i.AddSelector(selectorC, "policyB")
i.AddSelector(selectorD, "policyD")
i.AddSelector(selectorE, "policyE")
stopCh := make(chan struct{})
defer close(stopCh)

Expand Down Expand Up @@ -504,10 +512,12 @@ func TestDeleteLabelIdentity(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
i := NewLabelIdentityIndex()
i.SetPolicySelectors([]*types.GroupSelector{selectorA, selectorC}, "policyA")
i.SetPolicySelectors([]*types.GroupSelector{selectorB, selectorC}, "policyB")
i.SetPolicySelectors([]*types.GroupSelector{selectorD}, "policyD")
i.SetPolicySelectors([]*types.GroupSelector{selectorE}, "policyE")
i.AddSelector(selectorA, "policyA")
i.AddSelector(selectorC, "policyA")
i.AddSelector(selectorB, "policyB")
i.AddSelector(selectorC, "policyB")
i.AddSelector(selectorD, "policyD")
i.AddSelector(selectorE, "policyE")
stopCh := make(chan struct{})
defer close(stopCh)

Expand Down
8 changes: 6 additions & 2 deletions pkg/controller/networkpolicy/antreanetworkpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ func (n *NetworkPolicyController) processAntreaNetworkPolicy(np *crdv1alpha1.Net
appliedToGroups := map[string]*antreatypes.AppliedToGroup{}
addressGroups := map[string]*antreatypes.AddressGroup{}
rules := make([]controlplane.NetworkPolicyRule, 0, len(np.Spec.Ingress)+len(np.Spec.Egress))
var clusterSetScopeSelectors []*antreatypes.GroupSelector
// Create AppliedToGroup for each AppliedTo present in AntreaNetworkPolicy spec.
atgs := n.processAppliedTo(np.Namespace, np.Spec.AppliedTo)
appliedToGroups = mergeAppliedToGroups(appliedToGroups, atgs...)
Expand All @@ -96,7 +97,7 @@ func (n *NetworkPolicyController) processAntreaNetworkPolicy(np *crdv1alpha1.Net
// Create AppliedToGroup for each AppliedTo present in the ingress rule.
atgs := n.processAppliedTo(np.Namespace, ingressRule.AppliedTo)
appliedToGroups = mergeAppliedToGroups(appliedToGroups, atgs...)
peer, ags := n.toAntreaPeerForCRD(ingressRule.From, np, controlplane.DirectionIn, namedPortExists)
peer, ags := n.toAntreaPeerForCRD(ingressRule.From, np, controlplane.DirectionIn, namedPortExists, clusterSetScopeSelectors)
addressGroups = mergeAddressGroups(addressGroups, ags...)
rules = append(rules, controlplane.NetworkPolicyRule{
Direction: controlplane.DirectionIn,
Expand All @@ -122,7 +123,7 @@ func (n *NetworkPolicyController) processAntreaNetworkPolicy(np *crdv1alpha1.Net
peer = n.svcRefToPeerForCRD(egressRule.ToServices, np.Namespace)
} else {
var ags []*antreatypes.AddressGroup
peer, ags = n.toAntreaPeerForCRD(egressRule.To, np, controlplane.DirectionOut, namedPortExists)
peer, ags = n.toAntreaPeerForCRD(egressRule.To, np, controlplane.DirectionOut, namedPortExists, clusterSetScopeSelectors)
addressGroups = mergeAddressGroups(addressGroups, ags...)
}
rules = append(rules, controlplane.NetworkPolicyRule{
Expand Down Expand Up @@ -154,6 +155,9 @@ func (n *NetworkPolicyController) processAntreaNetworkPolicy(np *crdv1alpha1.Net
TierPriority: &tierPriority,
AppliedToPerRule: appliedToPerRule,
}
if n.stretchNPEnabled {
n.labelIdentityInterface.SetPolicySelectors(clusterSetScopeSelectors, internalNetworkPolicyKeyFunc(np))
}
return internalNetworkPolicy, appliedToGroups, addressGroups
}

Expand Down
12 changes: 8 additions & 4 deletions pkg/controller/networkpolicy/clusternetworkpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1alpha1.C
var clusterAppliedToAffectedNS []string
// atgForNamespace is the appliedToGroups split by Namespaces.
var atgForNamespace []*antreatypes.AppliedToGroup
var clusterSetScopeSelectors []*antreatypes.GroupSelector
if hasPerNamespaceRule && len(cnp.Spec.AppliedTo) > 0 {
for _, at := range cnp.Spec.AppliedTo {
if at.ServiceAccount != nil {
Expand Down Expand Up @@ -396,7 +397,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1alpha1.C
if cnpRule.ToServices != nil {
addRule(n.svcRefToPeerForCRD(cnpRule.ToServices, ""), nil, direction, ruleATGs)
} else {
peer, ags := n.toAntreaPeerForCRD(clusterPeers, cnp, direction, namedPortExists)
peer, ags := n.toAntreaPeerForCRD(clusterPeers, cnp, direction, namedPortExists, clusterSetScopeSelectors)
addRule(peer, ags, direction, ruleATGs)
}
}
Expand All @@ -405,7 +406,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1alpha1.C
// Create a rule for each affected Namespace of appliedTo at spec level
for i := range clusterAppliedToAffectedNS {
klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", clusterAppliedToAffectedNS[i], idx, cnp.Name)
peer, ags := n.toNamespacedPeerForCRD(perNSPeers, clusterAppliedToAffectedNS[i])
peer, ags := n.toNamespacedPeerForCRD(perNSPeers, cnp, clusterAppliedToAffectedNS[i], clusterSetScopeSelectors)
addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atgForNamespace[i]})
}
} else {
Expand All @@ -414,14 +415,14 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1alpha1.C
if at.ServiceAccount != nil {
atg := n.createAppliedToGroup(at.ServiceAccount.Namespace, serviceAccountNameToPodSelector(at.ServiceAccount.Name), nil, nil)
klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", atg, idx, cnp.Name)
peer, ags := n.toNamespacedPeerForCRD(perNSPeers, at.ServiceAccount.Namespace)
peer, ags := n.toNamespacedPeerForCRD(perNSPeers, cnp, at.ServiceAccount.Namespace, clusterSetScopeSelectors)
addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atg})
} else {
affectedNS := n.getAffectedNamespacesForAppliedTo(at)
for _, ns := range affectedNS {
atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector)
klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", atg, idx, cnp.Name)
peer, ags := n.toNamespacedPeerForCRD(perNSPeers, ns)
peer, ags := n.toNamespacedPeerForCRD(perNSPeers, cnp, ns, clusterSetScopeSelectors)
addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atg})
}
}
Expand Down Expand Up @@ -454,6 +455,9 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1alpha1.C
TierPriority: &tierPriority,
AppliedToPerRule: appliedToPerRule,
}
if n.stretchNPEnabled {
n.labelIdentityInterface.SetPolicySelectors(clusterSetScopeSelectors, internalNetworkPolicyKeyFunc(cnp))
}
return internalNetworkPolicy, appliedToGroups, addressGroups
}

Expand Down
40 changes: 27 additions & 13 deletions pkg/controller/networkpolicy/crd_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"antrea.io/antrea/pkg/apis/controlplane"
"antrea.io/antrea/pkg/apis/crd/v1alpha1"
crdv1alpha3 "antrea.io/antrea/pkg/apis/crd/v1alpha3"
"antrea.io/antrea/pkg/controller/labelidentity"
antreatypes "antrea.io/antrea/pkg/controller/types"
"antrea.io/antrea/pkg/util/k8s"
)
Expand Down Expand Up @@ -140,8 +141,12 @@ func toAntreaIPBlockForCRD(ipBlock *v1alpha1.IPBlock) (*controlplane.IPBlock, er
// It is used when peer's Namespaces are not matched by NamespaceMatchTypes, for which the controlplane
// NetworkPolicyPeers will need to be created on a per Namespace basis.
func (n *NetworkPolicyController) toAntreaPeerForCRD(peers []v1alpha1.NetworkPolicyPeer,
np metav1.Object, dir controlplane.Direction, namedPortExists bool) (*controlplane.NetworkPolicyPeer, []*antreatypes.AddressGroup) {
np metav1.Object, dir controlplane.Direction, namedPortExists bool,
clusterSetScopeSelectors []*antreatypes.GroupSelector) (*controlplane.NetworkPolicyPeer, []*antreatypes.AddressGroup) {
var addressGroups []*antreatypes.AddressGroup
var ipBlocks []controlplane.IPBlock
var fqdns []string
var labelIdentities []uint32
// NetworkPolicyPeer is supposed to match all addresses when it is empty and no clusterGroup is present.
// It's treated as an IPBlock "0.0.0.0/0".
if len(peers) == 0 {
Expand All @@ -160,9 +165,6 @@ func (n *NetworkPolicyController) toAntreaPeerForCRD(peers []v1alpha1.NetworkPol
podsPeer.AddressGroups = append(podsPeer.AddressGroups, allPodsGroup.Name)
return &podsPeer, addressGroups
}
var ipBlocks []controlplane.IPBlock
var fqdns []string
var clusterSetScopeSelectors []*antreatypes.GroupSelector
for _, peer := range peers {
// A v1alpha1.NetworkPolicyPeer will have exactly one of the following fields set:
// - podSelector and/or namespaceSelector (in-cluster scope or ClusterSet scope)
Expand Down Expand Up @@ -194,27 +196,39 @@ func (n *NetworkPolicyController) toAntreaPeerForCRD(peers []v1alpha1.NetworkPol
addressGroup := n.createAddressGroup(np.GetNamespace(), peer.PodSelector, peer.NamespaceSelector, peer.ExternalEntitySelector, nil)
addressGroups = append(addressGroups, addressGroup)
}
if peer.Scope == v1alpha1.ScopeClusterSet {
clusterSetScopeSelectors = append(clusterSetScopeSelectors, antreatypes.NewGroupSelector(np.GetNamespace(), peer.PodSelector, peer.NamespaceSelector, nil, nil))
if n.stretchNPEnabled && peer.Scope == v1alpha1.ScopeClusterSet {
newClusterSetScopeSelector := antreatypes.NewGroupSelector(np.GetNamespace(), peer.PodSelector, peer.NamespaceSelector, nil, nil)
clusterSetScopeSelectors = append(clusterSetScopeSelectors, newClusterSetScopeSelector)
labelIdentities = append(labelIdentities, n.labelIdentityInterface.AddSelector(newClusterSetScopeSelector, internalNetworkPolicyKeyFunc(np))...)
}
}
var labelIdentities []uint32
if n.stretchNPEnabled {
labelIdentities = n.labelIdentityInterface.SetPolicySelectors(clusterSetScopeSelectors, internalNetworkPolicyKeyFunc(np))
}
return &controlplane.NetworkPolicyPeer{AddressGroups: getAddressGroupNames(addressGroups), IPBlocks: ipBlocks, FQDNs: fqdns, LabelIdentities: labelIdentities}, addressGroups
return &controlplane.NetworkPolicyPeer{
AddressGroups: getAddressGroupNames(addressGroups),
IPBlocks: ipBlocks,
FQDNs: fqdns,
LabelIdentities: labelidentity.DedupLabelIdentites(labelIdentities),
}, addressGroups
}

// toNamespacedPeerForCRD creates an Antrea controlplane NetworkPolicyPeer for crdv1alpha1 NetworkPolicyPeer
// for a particular Namespace. It is used when a single crdv1alpha1 NetworkPolicyPeer maps to multiple
// controlplane NetworkPolicyPeers because the appliedTo workloads reside in different Namespaces.
func (n *NetworkPolicyController) toNamespacedPeerForCRD(peers []v1alpha1.NetworkPolicyPeer, namespace string) (*controlplane.NetworkPolicyPeer, []*antreatypes.AddressGroup) {
func (n *NetworkPolicyController) toNamespacedPeerForCRD(peers []v1alpha1.NetworkPolicyPeer,
np metav1.Object, namespace string, clusterSetScopeSelectors []*antreatypes.GroupSelector) (*controlplane.NetworkPolicyPeer, []*antreatypes.AddressGroup) {
var addressGroups []*antreatypes.AddressGroup
var labelIdentities []uint32
for _, peer := range peers {
addressGroup := n.createAddressGroup(namespace, peer.PodSelector, nil, peer.ExternalEntitySelector, nil)
addressGroups = append(addressGroups, addressGroup)
if n.stretchNPEnabled && peer.Scope == v1alpha1.ScopeClusterSet {
newClusterSetScopeSelector := antreatypes.NewGroupSelector(namespace, peer.PodSelector, nil, peer.ExternalEntitySelector, nil)
clusterSetScopeSelectors = append(clusterSetScopeSelectors, newClusterSetScopeSelector)
labelIdentities = append(labelIdentities, n.labelIdentityInterface.AddSelector(newClusterSetScopeSelector, internalNetworkPolicyKeyFunc(np))...)
}
}
return &controlplane.NetworkPolicyPeer{AddressGroups: getAddressGroupNames(addressGroups)}, addressGroups
return &controlplane.NetworkPolicyPeer{
AddressGroups: getAddressGroupNames(addressGroups), LabelIdentities: labelidentity.DedupLabelIdentites(labelIdentities),
}, addressGroups
}

// svcRefToPeerForCRD creates an Antrea controlplane NetworkPolicyPeer from ServiceReferences in ToServices
Expand Down

0 comments on commit c587578

Please sign in to comment.