Skip to content

Commit

Permalink
Merge pull request #1600 from ibihim/AUTH-442-psa_cluster_fleet_evalu…
Browse files Browse the repository at this point in the history
…ation

[4.14] OCPBUGS-25384: psa cluster fleet evaluation
  • Loading branch information
openshift-merge-bot[bot] committed Dec 14, 2023
2 parents 37fc6f9 + 5ddf356 commit 2a07e2d
Show file tree
Hide file tree
Showing 18 changed files with 1,284 additions and 20 deletions.
3 changes: 2 additions & 1 deletion go.mod
Expand Up @@ -15,7 +15,7 @@ require (
github.com/openshift/api v0.0.0-20230810152202-3e3f07aadec4
github.com/openshift/build-machinery-go v0.0.0-20230228230858-4cd708338479
github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb
github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123
github.com/openshift/library-go v0.0.0-20231213084759-840298df1eee
github.com/pkg/profile v1.5.0 // indirect
github.com/prometheus/client_golang v1.14.0
github.com/spf13/cobra v1.6.1
Expand All @@ -30,6 +30,7 @@ require (
k8s.io/client-go v0.27.4
k8s.io/component-base v0.27.4
k8s.io/klog/v2 v2.100.1
k8s.io/pod-security-admission v0.27.4
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96
)
Expand Down
6 changes: 4 additions & 2 deletions go.sum
Expand Up @@ -293,8 +293,8 @@ github.com/openshift/build-machinery-go v0.0.0-20230228230858-4cd708338479 h1:IU
github.com/openshift/build-machinery-go v0.0.0-20230228230858-4cd708338479/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb h1:Nij5OnaECrkmcRQMAE9LMbQXPo95aqFnf+12B7SyFVI=
github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb/go.mod h1:Rhb3moCqeiTuGHAbXBOlwPubUMlOZEkrEWTRjIF3jzs=
github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123 h1:JfXG50f8yVud5xakwTHoqD00+3HYdLmZuEqn5Sq8ZRQ=
github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123/go.mod h1:ZFwNwC3opc/7aOvzUbU95zp33Lbxet48h80ryH3p6DY=
github.com/openshift/library-go v0.0.0-20231213084759-840298df1eee h1:pHWQZMxHsE841/QjJEfM4vNl+PrjcrdWk0Qn4MjNhuk=
github.com/openshift/library-go v0.0.0-20231213084759-840298df1eee/go.mod h1:ZFwNwC3opc/7aOvzUbU95zp33Lbxet48h80ryH3p6DY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
Expand Down Expand Up @@ -771,6 +771,8 @@ k8s.io/kube-aggregator v0.27.4 h1:WdK9iiBr32G8bWfpUEFVQl70RZO2dU19ZAktUXL5JFc=
k8s.io/kube-aggregator v0.27.4/go.mod h1:+eG83gkAyh0uilQEAOgheeQW4hr+PkyV+5O1nLGsjlM=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
k8s.io/pod-security-admission v0.27.4 h1:AA32ID+ECNJoUU8yuzLt4WzKPDZg7zMmP2cZ9rVsFyE=
k8s.io/pod-security-admission v0.27.4/go.mod h1:GOcnrXk8TT5cPhtCxdlkOAvBnX3QmZiMHqPw9PbZhPs=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
Expand Down
80 changes: 80 additions & 0 deletions pkg/operator/podsecurityreadinesscontroller/conditions.go
@@ -0,0 +1,80 @@
package podsecurityreadinesscontroller

import (
"fmt"
"sort"
"strings"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"

operatorv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/library-go/pkg/operator/v1helpers"
)

const (
PodSecurityCustomerType = "PodSecurityCustomerEvaluationConditionsDetected"
PodSecurityOpenshiftType = "PodSecurityOpenshiftEvaluationConditionsDetected"
PodSecurityRunLevelZeroType = "PodSecurityRunLevelZeroEvaluationConditionsDetected"
)

var (
// run-level zero namespaces, shouldn't avoid openshift namespaces
runLevelZeroNamespaces = sets.New[string](
"default",
"kube-system",
"kube-public",
)
)

type podSecurityOperatorConditions struct {
violatingOpenShiftNamespaces []string
violatingRunLevelZeroNamespaces []string
violatingCustomerNamespaces []string
}

func (c *podSecurityOperatorConditions) addViolation(name string) {
if runLevelZeroNamespaces.Has(name) {
c.violatingRunLevelZeroNamespaces = append(c.violatingRunLevelZeroNamespaces, name)
return
}

isOpenShift := strings.HasPrefix(name, "openshift")
if isOpenShift {
c.violatingOpenShiftNamespaces = append(c.violatingOpenShiftNamespaces, name)
return
}

c.violatingCustomerNamespaces = append(c.violatingCustomerNamespaces, name)
}

func makeCondition(conditionType string, namespaces []string) operatorv1.OperatorCondition {
if len(namespaces) > 0 {
sort.Strings(namespaces)
return operatorv1.OperatorCondition{
Type: conditionType,
Status: operatorv1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: "PSViolationsDetected",
Message: fmt.Sprintf(
"Violations detected in namespaces: %v",
namespaces,
),
}
}

return operatorv1.OperatorCondition{
Type: conditionType,
Status: operatorv1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: "ExpectedReason",
}
}

func (c *podSecurityOperatorConditions) toConditionFuncs() []v1helpers.UpdateStatusFunc {
return []v1helpers.UpdateStatusFunc{
v1helpers.UpdateConditionFn(makeCondition(PodSecurityCustomerType, c.violatingCustomerNamespaces)),
v1helpers.UpdateConditionFn(makeCondition(PodSecurityOpenshiftType, c.violatingOpenShiftNamespaces)),
v1helpers.UpdateConditionFn(makeCondition(PodSecurityRunLevelZeroType, c.violatingRunLevelZeroNamespaces)),
}
}
69 changes: 69 additions & 0 deletions pkg/operator/podsecurityreadinesscontroller/conditions_test.go
@@ -0,0 +1,69 @@
package podsecurityreadinesscontroller

import (
"testing"

operatorv1 "github.com/openshift/api/operator/v1"
)

func TestCondition(t *testing.T) {
t.Run("with namespaces", func(t *testing.T) {
namespaces := []string{"namespace1", "namespace2"}
expectedCondition := operatorv1.OperatorCondition{
Type: PodSecurityCustomerType,
Status: operatorv1.ConditionTrue,
Reason: "PSViolationsDetected",
Message: "Violations detected in namespaces: [namespace1 namespace2]",
}

condition := makeCondition(PodSecurityCustomerType, namespaces)

if condition.Type != expectedCondition.Type {
t.Errorf("expected condition type %s, got %s", expectedCondition.Type, condition.Type)
}

if condition.Status != expectedCondition.Status {
t.Errorf("expected condition status %s, got %s", expectedCondition.Status, condition.Status)
}

if condition.Reason != expectedCondition.Reason {
t.Errorf("expected condition reason %s, got %s", expectedCondition.Reason, condition.Reason)
}

if condition.Message != expectedCondition.Message {
t.Errorf("expected condition message %s, got %s", expectedCondition.Message, condition.Message)
}
})

t.Run("without namespaces", func(t *testing.T) {
namespaces := []string{}
expectedCondition := operatorv1.OperatorCondition{
Type: PodSecurityCustomerType,
Status: operatorv1.ConditionFalse,
Reason: "ExpectedReason",
}

condition := makeCondition(PodSecurityCustomerType, namespaces)

if condition.Type != expectedCondition.Type {
t.Errorf("expected condition type %s, got %s", expectedCondition.Type, condition.Type)
}

if condition.Status != expectedCondition.Status {
t.Errorf("expected condition status %s, got %s", expectedCondition.Status, condition.Status)
}

if condition.Reason != expectedCondition.Reason {
t.Errorf("expected condition reason %s, got %s", expectedCondition.Reason, condition.Reason)
}

if condition.Message != expectedCondition.Message {
t.Errorf("expected condition message %s, got %s", expectedCondition.Message, condition.Message)
}
})

t.Run("without anything", func(t *testing.T) {
cond := podSecurityOperatorConditions{}
cond.addViolation("hello world")
})
}
@@ -0,0 +1,171 @@
package podsecurityreadinesscontroller

import (
"context"
"time"

corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
applyconfiguration "k8s.io/client-go/applyconfigurations/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
psapi "k8s.io/pod-security-admission/api"

"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/v1helpers"
)

const (
checkInterval = 240 * time.Minute // Adjust the interval as needed.
)

var podSecurityAlertLabels = []string{
psapi.AuditLevelLabel,
psapi.WarnLevelLabel,
}

// PodSecurityReadinessController checks if namespaces are ready for Pod Security Admission enforcement.
type PodSecurityReadinessController struct {
kubeClient kubernetes.Interface
operatorClient v1helpers.OperatorClient

warningsHandler *warningsHandler
namespaceSelector string
}

func NewPodSecurityReadinessController(
kubeConfig *rest.Config,
operatorClient v1helpers.OperatorClient,
recorder events.Recorder,
) (factory.Controller, error) {
warningsHandler := &warningsHandler{}

kubeClientCopy := rest.CopyConfig(kubeConfig)
kubeClientCopy.WarningHandler = warningsHandler
// We don't want to overwhelm the apiserver with requests. On a cluster with
// 10k namespaces, we would send 10k + 1 requests to the apiserver.
kubeClientCopy.QPS = 2
kubeClientCopy.Burst = 2
kubeClient, err := kubernetes.NewForConfig(kubeClientCopy)
if err != nil {
return nil, err
}

selector := labels.NewSelector()
labelsRequirement, err := labels.NewRequirement(psapi.EnforceLevelLabel, selection.DoesNotExist, []string{})
if err != nil {
return nil, err
}

c := &PodSecurityReadinessController{
operatorClient: operatorClient,
kubeClient: kubeClient,
warningsHandler: warningsHandler,
namespaceSelector: selector.Add(*labelsRequirement).String(),
}

return factory.New().
WithSync(c.sync).
ResyncEvery(checkInterval).
ToController("PodSecurityReadinessController", recorder), nil
}

func (c *PodSecurityReadinessController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
nsList, err := c.kubeClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{LabelSelector: c.namespaceSelector})
if err != nil {
return err
}

conditions := podSecurityOperatorConditions{}
for _, ns := range nsList.Items {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
isViolating, err := c.isNamespaceViolating(ctx, &ns)
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
if isViolating {
conditions.addViolation(ns.Name)
}

return nil
})
if err != nil {
klog.V(2).ErrorS(err, "namespace:", ns.Name)

// We don't want to sync more often than the resync interval.
return nil

}
}

// We expect the Cluster's status conditions to be picked up by the status
// controller and push it into the ClusterOperator's status, where it will
// be evaluated by the ClusterFleetMechanic.
_, _, err = v1helpers.UpdateStatus(ctx, c.operatorClient, conditions.toConditionFuncs()...)
return err
}

func (c *PodSecurityReadinessController) isNamespaceViolating(ctx context.Context, ns *corev1.Namespace) (bool, error) {
if ns.Labels[psapi.EnforceLevelLabel] != "" {
// If someone has taken care of the enforce label, we don't need to
// check for violations. Global Config nor PS-Label-Syncer will modify
// it.
return false, nil
}

targetLevel := ""
for _, label := range podSecurityAlertLabels {
levelStr, ok := ns.Labels[label]
if !ok {
continue
}

level, err := psapi.ParseLevel(levelStr)
if err != nil {
klog.V(4).InfoS("invalid level", "namespace", ns.Name, "level", levelStr)
continue
}

if targetLevel == "" {
targetLevel = levelStr
continue
}

if psapi.CompareLevels(psapi.Level(targetLevel), level) < 0 {
targetLevel = levelStr
}
}

if targetLevel == "" {
// Global Config will set it to "restricted".
targetLevel = string(psapi.LevelRestricted)
}

nsApply := applyconfiguration.Namespace(ns.Name).WithLabels(map[string]string{
psapi.EnforceLevelLabel: string(targetLevel),
})

_, err := c.kubeClient.CoreV1().
Namespaces().
Apply(ctx, nsApply, metav1.ApplyOptions{
DryRun: []string{metav1.DryRunAll},
FieldManager: "pod-security-readiness-controller",
})
if err != nil {
return false, err
}

// The information we want is in the warnings. It collects violations.
warnings := c.warningsHandler.PopAll()

return len(warnings) > 0, nil
}

0 comments on commit 2a07e2d

Please sign in to comment.