Skip to content

Commit

Permalink
Merge pull request #2423 from lionelvillard/dns-network-policies
Browse files Browse the repository at this point in the history
✨ Adding DNS network policies
  • Loading branch information
openshift-merge-robot committed Feb 9, 2023
2 parents 2a1c00a + 656ad4d commit ee81cfe
Show file tree
Hide file tree
Showing 15 changed files with 962 additions and 38 deletions.
28 changes: 28 additions & 0 deletions pkg/cliplugins/workload/plugin/sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,12 @@ rules:
- "list"
- "watch"
- "delete"
- apiGroups:
- ""
resources:
- endpoints
verbs:
- "get"
- apiGroups:
- "apiextensions.k8s.io"
resources:
Expand All @@ -68,6 +74,14 @@ rules:
- "get"
- "watch"
- "list"
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- "create"
- "list"
- "watch"
- apiGroups:
- ""
resources:
Expand Down Expand Up @@ -290,6 +304,12 @@ rules:
- "list"
- "watch"
- "delete"
- apiGroups:
- ""
resources:
- endpoints
verbs:
- "get"
- apiGroups:
- "apiextensions.k8s.io"
resources:
Expand All @@ -298,6 +318,14 @@ rules:
- "get"
- "watch"
- "list"
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- "create"
- "list"
- "watch"
- apiGroups:
- ""
resources:
Expand Down
14 changes: 14 additions & 0 deletions pkg/cliplugins/workload/plugin/syncer.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@ rules:
- "list"
- "watch"
- "delete"
- apiGroups:
- ""
resources:
- endpoints
verbs:
- "get"
- apiGroups:
- "apiextensions.k8s.io"
resources:
Expand All @@ -42,6 +48,14 @@ rules:
- "get"
- "watch"
- "list"
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- "create"
- "list"
- "watch"
{{- range $groupMapping := .GroupMappings}}
- apiGroups:
- "{{$groupMapping.APIGroup}}"
Expand Down
24 changes: 24 additions & 0 deletions pkg/syncer/shared/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ package shared

import (
"crypto/sha256"
"encoding/json"
"fmt"
"math/big"
"strings"

"github.com/kcp-dev/logicalcluster/v3"
Expand Down Expand Up @@ -72,6 +74,28 @@ func GetDNSID(clusterName logicalcluster.Name, syncTargetUID types.UID, syncTarg
return fmt.Sprintf("kcp-dns-%s-%s-%s", syncTargetName, uid36hash[:8], workspace36hash[:8])
}

// GetTenantID encodes the KCP tenant to which the namespace designated by the given
// NamespaceLocator belongs. It is based on the NamespaceLocator, but with an empty
// namespace value. The value will be the same for all downstream namespaces originating
// from the same KCP workspace / SyncTarget.
// The encoding is repeatable.
func GetTenantID(l NamespaceLocator) (string, error) {
clusterWideLocator := NamespaceLocator{
SyncTarget: l.SyncTarget,
ClusterName: l.ClusterName,
}

b, err := json.Marshal(clusterWideLocator)
if err != nil {
return "", err
}

hash := sha256.Sum224(b)
var i big.Int
i.SetBytes(hash[:])
return i.Text(62), nil
}

func ContainsGVR(gvrs []schema.GroupVersionResource, gvr schema.GroupVersionResource) bool {
for _, item := range gvrs {
if gvr == item {
Expand Down
1 change: 1 addition & 0 deletions pkg/syncer/shared/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (

const (
NamespaceLocatorAnnotation = "kcp.io/namespace-locator"
TenantIDLabel = "kcp.io/tenant-id"
)

// NamespaceLocator stores a logical cluster and namespace and is used
Expand Down
58 changes: 49 additions & 9 deletions pkg/syncer/spec/dns/dns_process.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,9 @@ package dns

import (
"context"
"errors"
"sync"

"github.com/kcp-dev/logicalcluster/v3"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand All @@ -30,6 +29,7 @@ import (
"k8s.io/client-go/kubernetes"
listersappsv1 "k8s.io/client-go/listers/apps/v1"
listerscorev1 "k8s.io/client-go/listers/core/v1"
listersnetworkingv1 "k8s.io/client-go/listers/networking/v1"
listersrbacv1 "k8s.io/client-go/listers/rbac/v1"
"k8s.io/klog/v2"

Expand All @@ -45,9 +45,10 @@ type DNSProcessor struct {
deploymentLister listersappsv1.DeploymentLister
serviceLister listerscorev1.ServiceLister
endpointLister listerscorev1.EndpointsLister
networkPolicyLister listersnetworkingv1.NetworkPolicyLister

syncTargetName string
syncTargetUID types.UID
syncTargetName string
dnsNamespace string // namespace containing all DNS objects
dnsImage string

Expand All @@ -63,8 +64,9 @@ func NewDNSProcessor(
deploymentLister listersappsv1.DeploymentLister,
serviceLister listerscorev1.ServiceLister,
endpointLister listerscorev1.EndpointsLister,
syncTargetName string,
networkPolicyLister listersnetworkingv1.NetworkPolicyLister,
syncTargetUID types.UID,
syncTargetName string,
dnsNamespace string,
dnsImage string) *DNSProcessor {
return &DNSProcessor{
Expand All @@ -75,8 +77,9 @@ func NewDNSProcessor(
deploymentLister: deploymentLister,
serviceLister: serviceLister,
endpointLister: endpointLister,
syncTargetName: syncTargetName,
networkPolicyLister: networkPolicyLister,
syncTargetUID: syncTargetUID,
syncTargetName: syncTargetName,
dnsNamespace: dnsNamespace,
dnsImage: dnsImage,
}
Expand All @@ -87,12 +90,12 @@ func NewDNSProcessor(
// are effectively reachable through the Service.
// It returns true if the DNS is setup and reachable, and returns an error if there was an error
// during the check or creation of the DNS-related resources.
func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logicalcluster.Name) (bool, error) {
func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, namespaceLocator shared.NamespaceLocator) (bool, error) {
logger := klog.FromContext(ctx)
logger.WithName("dns")
logger = logger.WithName("dns")

dnsID := shared.GetDNSID(workspace, d.syncTargetUID, d.syncTargetName)
logger.WithValues("name", dnsID, "namespace", d.dnsNamespace)
dnsID := shared.GetDNSID(namespaceLocator.ClusterName, d.syncTargetUID, d.syncTargetName)
logger = logger.WithValues("name", dnsID, "namespace", d.dnsNamespace)

logger.V(4).Info("checking if all dns objects exist and are up-to-date")
ctx = klog.NewContext(ctx, logger)
Expand Down Expand Up @@ -132,6 +135,10 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica
if err := d.processService(ctx, dnsID); err != nil {
return false, err
}
if err := d.processNetworkPolicy(ctx, dnsID, namespaceLocator); err != nil {
return false, err
}

// Since the Endpoints resource was not found, the DNS is not yet ready,
// even though all the required resources have been created
// (deployment still needs to start).
Expand Down Expand Up @@ -233,6 +240,39 @@ func (d *DNSProcessor) processService(ctx context.Context, name string) error {
return nil
}

func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string, namespaceLocator shared.NamespaceLocator) error {
logger := klog.FromContext(ctx)

var kubeEndpoints *corev1.Endpoints
_, err := d.networkPolicyLister.NetworkPolicies(d.dnsNamespace).Get(name)
if apierrors.IsNotFound(err) {
kubeEndpoints, err = d.downstreamKubeClient.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{})
if err != nil {
return err
}
if len(kubeEndpoints.Subsets) == 0 || len(kubeEndpoints.Subsets[0].Addresses) == 0 {
return errors.New("missing kubernetes API endpoints")
}

tenantID, err := shared.GetTenantID(namespaceLocator)
if err != nil {
return err
}

expected := MakeNetworkPolicy(name, d.dnsNamespace, tenantID, &kubeEndpoints.Subsets[0])
_, err = d.downstreamKubeClient.NetworkingV1().NetworkPolicies(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{})
if err == nil {
logger.Info("NetworkPolicy created")
}
}
if err != nil && !apierrors.IsAlreadyExists(err) {
logger.Error(err, "failed to get NetworkPolicy (retrying)")
return err
}

return nil
}

func hasAtLeastOneReadyAddress(endpoints *corev1.Endpoints) bool {
for _, s := range endpoints.Subsets {
if len(s.Addresses) > 0 && s.Addresses[0].IP != "" {
Expand Down

0 comments on commit ee81cfe

Please sign in to comment.