Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add multi-pod tests with SELinux mounts #113789

Merged
merged 5 commits into from Jan 18, 2023
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
17 changes: 16 additions & 1 deletion test/e2e/storage/csi_mock/base.go
Expand Up @@ -664,6 +664,11 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV
},
},
}
if node.Name != "" {
// Force schedule the pod to skip scheduler RWOP checks
framework.Logf("Forcing node name %s", node.Name)
pod.Spec.NodeName = node.Name
}
e2epod.SetNodeSelection(&pod.Spec, node)
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
}
Expand Down Expand Up @@ -821,23 +826,33 @@ func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSeq

// createSELinuxMountPreHook creates a hook that records the mountOptions passed in
// through NodeStageVolume and NodePublishVolume calls.
func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string) *drivers.Hooks {
func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string, stageCalls, unstageCalls, publishCalls, unpublishCalls *atomic.Int32) *drivers.Hooks {
return &drivers.Hooks{
Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest)
if ok {
stageCalls.Add(1)
mountVolume := nodeStageRequest.GetVolumeCapability().GetMount()
if mountVolume != nil {
*nodeStageMountOpts = mountVolume.MountFlags
}
}
nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest)
if ok {
publishCalls.Add(1)
mountVolume := nodePublishRequest.GetVolumeCapability().GetMount()
if mountVolume != nil {
*nodePublishMountOpts = mountVolume.MountFlags
}
}
_, ok = request.(*csipbv1.NodeUnstageVolumeRequest)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing error handling?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the code is correct, still, I rewrote it to switch request.(type) to be more obvious.

if ok {
unstageCalls.Add(1)
}
_, ok = request.(*csipbv1.NodeUnpublishVolumeRequest)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here?

if ok {
unpublishCalls.Add(1)
}
return nil, nil
},
}
Expand Down
210 changes: 164 additions & 46 deletions test/e2e/storage/csi_mock/csi_selinux_mount.go
Expand Up @@ -18,10 +18,17 @@ package csi_mock

import (
"context"
"fmt"
"sync/atomic"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils"
Expand All @@ -35,57 +42,92 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {

ginkgo.Context("SELinuxMount [LinuxOnly][Feature:SELinux][Feature:SELinuxMountReadWriteOncePod]", func() {
// Make sure all options are set so system specific defaults are not used.
seLinuxOpts := v1.SELinuxOptions{
seLinuxOpts1 := v1.SELinuxOptions{
User: "system_u",
Role: "object_r",
Type: "container_file_t",
Level: "s0:c0,c1",
}
seLinuxMountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
seLinuxMountOption1 := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
seLinuxOpts2 := v1.SELinuxOptions{
User: "system_u",
Role: "object_r",
Type: "container_file_t",
Level: "s0:c98,c99",
}
seLinuxMountOption2 := "context=\"system_u:object_r:container_file_t:s0:c98,c99\""

tests := []struct {
name string
seLinuxEnabled bool
seLinuxSetInPod bool
mountOptions []string
volumeMode v1.PersistentVolumeAccessMode
expectedMountOptions []string
name string
csiDriverSELinuxEnabled bool
firstPodSELinuxOpts *v1.SELinuxOptions
startSecondPod bool
secondPodSELinuxOpts *v1.SELinuxOptions
mountOptions []string
volumeMode v1.PersistentVolumeAccessMode
expectedFirstMountOptions []string
expectedSecondMountOptions []string
expectedUnstage bool
}{
// Start just a single pod and check its volume is mounted correctly
{
name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{seLinuxMountOption1},
},
{
name: "should add SELinux mount option to existing mount options",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
mountOptions: []string{"noexec", "noatime"},
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{"noexec", "noatime", seLinuxMountOption1},
},
{
name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
seLinuxEnabled: true,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: []string{seLinuxMountOption},
name: "should not pass SELinux mount option for RWO volume",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOnce,
expectedFirstMountOptions: nil,
},
{
name: "should add SELinux mount option to existing mount options",
seLinuxEnabled: true,
seLinuxSetInPod: true,
mountOptions: []string{"noexec", "noatime"},
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: []string{"noexec", "noatime", seLinuxMountOption},
name: "should not pass SELinux mount option for Pod without SELinux context",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: nil,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: nil,
},
{
name: "should not pass SELinux mount option for RWO volume",
seLinuxEnabled: true,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOnce,
expectedMountOptions: nil,
name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
csiDriverSELinuxEnabled: false,
firstPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: nil,
},
// Start two pods in a sequence and check their volume is / is not unmounted in between
{
name: "should not pass SELinux mount option for Pod without SELinux context",
seLinuxEnabled: true,
seLinuxSetInPod: false,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: nil,
name: "should not unstage volume when starting a second pod with the same SELinux context",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
startSecondPod: true,
secondPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{seLinuxMountOption1},
expectedSecondMountOptions: []string{seLinuxMountOption1},
expectedUnstage: false,
},
{
name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
seLinuxEnabled: false,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: nil,
name: "should unstage volume when starting a second pod with different SELinux context",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
startSecondPod: true,
secondPodSELinuxOpts: &seLinuxOpts2,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{seLinuxMountOption1},
expectedSecondMountOptions: []string{seLinuxMountOption2},
expectedUnstage: true,
},
}
for _, t := range tests {
Expand All @@ -95,27 +137,103 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
}
var nodeStageMountOpts, nodePublishMountOpts []string
var unstageCalls, stageCalls, unpublishCalls, publishCalls atomic.Int32
m.init(ctx, testParameters{
disableAttach: true,
registerDriver: true,
enableSELinuxMount: &t.seLinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts),
enableSELinuxMount: &t.csiDriverSELinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls),
})
ginkgo.DeferCleanup(m.cleanup)
defer m.cleanup(ctx)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are you reverting to using defer here?

It will not be able to clean up when the test gets interrupted because then ctx will be canceled.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch, using DeferCleaup now


// Act
ginkgo.By("Starting the initial pod")
accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
var podSELinuxOpts *v1.SELinuxOptions
if t.seLinuxSetInPod {
// Make sure all options are set so system specific defaults are not used.
podSELinuxOpts = &seLinuxOpts
_, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts)
err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "starting the initial pod")

// Assert
ginkgo.By("Checking the initial pod mount options")
framework.ExpectEqual(nodeStageMountOpts, t.expectedFirstMountOptions, "NodeStage MountFlags for the initial pod")
framework.ExpectEqual(nodePublishMountOpts, t.expectedFirstMountOptions, "NodePublish MountFlags for the initial pod")

ginkgo.By("Checking the CSI driver calls for the initial pod")
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage call count for the initial pod")
gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnpublish call count for the initial pod")
gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage for the initial pod")
gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish for the initial pod")

if !t.startSecondPod {
return
}

_, _, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, podSELinuxOpts)
err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "failed to start pod")
// Arrange 2nd part of the test
ginkgo.By("Starting the second pod to check if a volume used by the initial pod is / is not unmounted based on SELinux context")

// Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV.
pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting the initial pod"))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need for fmt.Sprintf: there's nothing to format, and even if there was, ExpectNoError could be passed a format string plus parameters.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed

nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName}
pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts)
framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts)
m.pods = append(m.pods, pod2)

// Delete the initial pod only after kubelet processes the second pod and adds its volumes to
// DesiredStateOfWorld.
// In this state, any volume UnPublish / UnStage must be done because of SELinux contexts and not
// because of random races because volumes of the second pod are not in DesiredStateOfWorld yet.
ginkgo.By("Waiting for the second pod to fail to start because of ReadWriteOncePod.")
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod2.Name,
"involvedObject.namespace": pod2.Namespace,
"reason": events.FailedMountVolume,
}.AsSelector().String()
var msg string
if t.expectedUnstage {
// This message is emitted before kubelet checks for ReadWriteOncePod
msg = "conflicting SELinux labels of volume"
} else {
msg = "volume uses the ReadWriteOncePod access mode and is already in use by another pod"
}
err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart)
framework.ExpectNoError(err, "waiting for event %q in the second test pod", msg)

// count fresh CSI driver calls between the first and the second pod
nodeStageMountOpts = nil
nodePublishMountOpts = nil
unstageCalls.Store(0)
unpublishCalls.Store(0)
stageCalls.Store(0)
publishCalls.Store(0)

// Act 2nd part of the test
ginkgo.By("Deleting the initial pod")
err = e2epod.DeletePodWithWait(ctx, m.cs, pod)
framework.ExpectNoError(err, "deleting the initial pod")

framework.ExpectEqual(nodeStageMountOpts, t.expectedMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume. to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
framework.ExpectEqual(nodePublishMountOpts, t.expectedMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
// Assert 2nd part of the test
ginkgo.By("Waiting for the second pod to start")
err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "starting the second pod")

ginkgo.By("Checking CSI driver calls for the second pod")
if t.expectedUnstage {
// Volume should be fully unstaged between the first and the second pod
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage calls after the first pod is deleted")
gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage calls for the second pod")
// The second pod got the right mount option
framework.ExpectEqual(nodeStageMountOpts, t.expectedSecondMountOptions, "NodeStage MountFlags for the second pod")
} else {
// Volume should not be fully unstaged between the first and the second pod
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage calls after the first pod is deleted")
gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeStage calls for the second pod")
}
// In both cases, Unublish and Publish is called, with the right mount opts
gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish calls after the first pod is deleted")
gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish calls for the second pod")
framework.ExpectEqual(nodePublishMountOpts, t.expectedSecondMountOptions, "NodePublish MountFlags for the second pod")
})
}
})
Expand Down