Skip to content

Commit

Permalink
Pod secuirty on Openshift
Browse files Browse the repository at this point in the history
Make sure we are not racing with cluster
sync mechanism on Openshift.

Signed-off-by: L. Pivarc <lpivarc@redhat.com>
  • Loading branch information
xpivarc committed Sep 14, 2022
1 parent 8512fe3 commit 230676f
Show file tree
Hide file tree
Showing 8 changed files with 58 additions and 18 deletions.
6 changes: 6 additions & 0 deletions pkg/virt-controller/watch/application.go
Expand Up @@ -253,6 +253,8 @@ type VirtControllerApp struct {
nodeTopologyUpdatePeriod time.Duration
reloadableRateLimiter *ratelimiter.ReloadableRateLimiter
leaderElector *leaderelection.LeaderElector

onOpenshift bool
}

var _ service.Service = &VirtControllerApp{}
Expand Down Expand Up @@ -406,6 +408,8 @@ func Execute() {

app.vmCloneInformer = app.informerFactory.VirtualMachineClone()

app.onOpenshift = onOpenShift

app.initCommon()
app.initReplicaSet()
app.initPool()
Expand Down Expand Up @@ -590,6 +594,7 @@ func (vca *VirtControllerApp) initCommon() {
vca.clusterConfig,
topologyHinter,
vca.namespaceStore,
vca.onOpenshift,
)

recorder := vca.newRecorder(k8sv1.NamespaceAll, "node-controller")
Expand All @@ -607,6 +612,7 @@ func (vca *VirtControllerApp) initCommon() {
vca.clientSet,
vca.clusterConfig,
vca.namespaceStore,
vca.onOpenshift,
)

vca.nodeTopologyUpdater = topology.NewNodeTopologyUpdater(vca.clientSet, topologyHinter, vca.nodeInformer)
Expand Down
2 changes: 2 additions & 0 deletions pkg/virt-controller/watch/application_test.go
Expand Up @@ -130,6 +130,7 @@ var _ = Describe("Application", func() {
config,
topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil),
nil,
false,
)
app.rsController = NewVMIReplicaSet(vmiInformer, rsInformer, recorder, virtClient, uint(10))
app.vmController = NewVMController(vmiInformer,
Expand All @@ -153,6 +154,7 @@ var _ = Describe("Application", func() {
virtClient,
config,
nil,
false,
)
app.snapshotController = &snapshot.VMSnapshotController{
Client: virtClient,
Expand Down
8 changes: 6 additions & 2 deletions pkg/virt-controller/watch/migration.go
Expand Up @@ -112,6 +112,8 @@ type MigrationController struct {

unschedulablePendingTimeoutSeconds int64
catchAllPendingTimeoutSeconds int64

onOpenshift bool
}

func NewMigrationController(templateService services.TemplateService,
Expand All @@ -126,6 +128,7 @@ func NewMigrationController(templateService services.TemplateService,
clientset kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
namespaceStore cache.Store,
onOpenshift bool,
) *MigrationController {

c := &MigrationController{
Expand All @@ -150,6 +153,7 @@ func NewMigrationController(templateService services.TemplateService,
catchAllPendingTimeoutSeconds: defaultCatchAllPendingTimeoutSeconds,

namespaceStore: namespaceStore,
onOpenshift: onOpenshift,
}

c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -645,7 +649,7 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn

if c.clusterConfig.PSAEnabled() {
// Check my impact
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil {
return err
}
}
Expand Down Expand Up @@ -938,7 +942,7 @@ func (c *MigrationController) createAttachmentPod(migration *virtv1.VirtualMachi

if c.clusterConfig.PSAEnabled() {
// Check my impact
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil {
return err
}
}
Expand Down
1 change: 1 addition & 0 deletions pkg/virt-controller/watch/migration_test.go
Expand Up @@ -287,6 +287,7 @@ var _ = Describe("Migration watcher", func() {
virtClient,
config,
namespaceStore,
false,
)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.Queue)
Expand Down
11 changes: 9 additions & 2 deletions pkg/virt-controller/watch/psa.go
Expand Up @@ -30,8 +30,9 @@ import (
)

const PSALabel = "pod-security.kubernetes.io/enforce"
const OpenshiftPSAsync = "security.openshift.io/scc.podSecurityLabelSync"

func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string) error {
func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string, onOpenshift bool) error {
obj, exists, err := namespaceStore.GetByKey(namespace)
if err != nil {
return fmt.Errorf("Failed to get namespace, %w", err)
Expand All @@ -42,7 +43,13 @@ func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient
namespaceObj := obj.(*k8sv1.Namespace)
enforceLevel, labelExist := namespaceObj.Labels[PSALabel]
if !labelExist || enforceLevel != "privileged" {
data := []byte(fmt.Sprintf(`{"metadata": { "labels": {"%s": "privileged"}}}`, PSALabel))
labels := ""
if !onOpenshift {
labels = fmt.Sprintf(`{"%s": "privileged"}`, PSALabel)
} else {
labels = fmt.Sprintf(`{"%s": "privileged", "%s": "false"}`, PSALabel, OpenshiftPSAsync)
}
data := []byte(fmt.Sprintf(`{"metadata": { "labels": %s}}`, labels))
_, err := client.CoreV1().Namespaces().Patch(context.TODO(), namespace, types.StrategicMergePatchType, data, v1.PatchOptions{})
if err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)}
Expand Down
38 changes: 27 additions & 11 deletions pkg/virt-controller/watch/psa_test.go
Expand Up @@ -6,6 +6,7 @@ import (
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
Expand All @@ -21,6 +22,7 @@ var _ = Describe("PSA", func() {
client *kubecli.MockKubevirtClient
kubeClient *fake.Clientset
ctrl *gomock.Controller
notOnOpenshift = false
)

BeforeEach(func() {
Expand All @@ -32,21 +34,28 @@ var _ = Describe("PSA", func() {
})

Context("should patch namespace with enforce level", func() {
BeforeEach(func() {
var (
onOpenshift = true
psaLabels = HaveKeyWithValue(PSALabel, "privileged")
psaLabelsOnOpenshift = And(HaveKeyWithValue(PSALabel, "privileged"), HaveKeyWithValue(OpenshiftPSAsync, "false"))
)

expectLabels := func(expectedLabels types.GomegaMatcher) {
kubeClient.Fake.PrependReactor("patch", "namespaces",
func(action testing.Action) (handled bool, obj k8sruntime.Object, err error) {
patchAction, ok := action.(testing.PatchAction)
Expect(ok).To(BeTrue())
patchBytes := patchAction.GetPatch()
namespace := &k8sv1.Namespace{}
Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed())
Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed(), string(patchBytes))

Expect(namespace.Labels).To(HaveKeyWithValue(PSALabel, "privileged"))
Expect(namespace.Labels).To(expectedLabels)
return true, nil, nil
})
})
}

It("when label is missing", func() {
DescribeTable("when label is missing", func(expectedLabels types.GomegaMatcher, onOpenshift bool) {
expectLabels(expectedLabels)
namespace := &k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
Expand All @@ -57,10 +66,14 @@ var _ = Describe("PSA", func() {
}
Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred())

Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed())
})
Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed())
},
Entry("on plain Kubernetes", psaLabels, notOnOpenshift),
Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift),
)

It("when enforce label is not privileged", func() {
DescribeTable("when enforce label is not privileged", func(expectedLabels types.GomegaMatcher, onOpenshift bool) {
expectLabels(expectedLabels)
namespace := &k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
Expand All @@ -74,8 +87,11 @@ var _ = Describe("PSA", func() {
}
Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred())

Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed())
})
Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed())
},
Entry("on plain Kubernetes", psaLabels, notOnOpenshift),
Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift),
)
})
It("should not patch namespace when enforce label is set to privileged", func() {
namespace := &k8sv1.Namespace{
Expand All @@ -92,7 +108,7 @@ var _ = Describe("PSA", func() {
Expect("Patch namespaces is not expected").To(BeEmpty())
return true, nil, nil
})
Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed())
Expect(escalateNamespace(namespaceStore, client, "test", notOnOpenshift)).To(Succeed())
})

})
9 changes: 6 additions & 3 deletions pkg/virt-controller/watch/vmi.go
Expand Up @@ -156,6 +156,7 @@ func NewVMIController(templateService services.TemplateService,
clusterConfig *virtconfig.ClusterConfig,
topologyHinter topology.Hinter,
namespaceStore cache.Store,
onOpenshift bool,
) *VMIController {

c := &VMIController{
Expand All @@ -175,6 +176,7 @@ func NewVMIController(templateService services.TemplateService,
clusterConfig: clusterConfig,
topologyHinter: topologyHinter,
namespaceStore: namespaceStore,
onOpenshift: onOpenshift,
}

c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -237,6 +239,7 @@ type VMIController struct {
cdiConfigInformer cache.SharedIndexInformer
clusterConfig *virtconfig.ClusterConfig
namespaceStore cache.Store
onOpenshift bool
}

func (c *VMIController) Run(threadiness int, stopCh <-chan struct{}) {
Expand Down Expand Up @@ -1084,7 +1087,7 @@ func (c *VMIController) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod,

if c.clusterConfig.PSAEnabled() {
namespace := vmi.GetNamespace()
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)}
}
}
Expand Down Expand Up @@ -1808,7 +1811,7 @@ func (c *VMIController) createAttachmentPod(vmi *virtv1.VirtualMachineInstance,

if c.clusterConfig.PSAEnabled() {
namespace := vmi.GetNamespace()
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating attachment pod", namespace)}
}
}
Expand All @@ -1834,7 +1837,7 @@ func (c *VMIController) triggerHotplugPopulation(volume *virtv1.Volume, vmi *vir

if c.clusterConfig.PSAEnabled() {
namespace := vmi.GetNamespace()
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating hotplug population trigger pod", namespace)}
}
}
Expand Down
1 change: 1 addition & 0 deletions pkg/virt-controller/watch/vmi_test.go
Expand Up @@ -267,6 +267,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
config,
topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", config),
namespaceStore,
false,
)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.Queue)
Expand Down

0 comments on commit 230676f

Please sign in to comment.