Skip to content

Commit

Permalink
Merge pull request #33 from mhenriks/release-4.15-pick-pr-32
Browse files Browse the repository at this point in the history
OCPBUGS-29793: [release-4.15] Address GHSA-fg9q-5cw2-p6r9: Restrict access to infrastructure PVCs by requiring matching infraClusterLabels on tenant PVCs
  • Loading branch information
openshift-merge-bot[bot] committed Mar 20, 2024
2 parents aea7be8 + 001fd0b commit d3bdbce
Show file tree
Hide file tree
Showing 25 changed files with 1,828 additions and 13 deletions.
13 changes: 11 additions & 2 deletions cmd/kubevirt-csi-driver/kubevirt-csi-driver.go
Expand Up @@ -27,6 +27,7 @@ var (
infraClusterNamespace = flag.String("infra-cluster-namespace", "", "The infra-cluster namespace")
infraClusterKubeconfig = flag.String("infra-cluster-kubeconfig", "", "the infra-cluster kubeconfig file. If not set, defaults to in cluster config.")
infraClusterLabels = flag.String("infra-cluster-labels", "", "The infra-cluster labels to use when creating resources in infra cluster. 'name=value' fields separated by a comma")
volumePrefix = flag.String("volume-prefix", "pvc", "The prefix expected for persistent volumes")
// infraStorageClassEnforcement = flag.String("infra-storage-class-enforcement", "", "A string encoded yaml that represents the policy of enforcing which infra storage classes are allowed in persistentVolume of type kubevirt")
infraStorageClassEnforcement = os.Getenv("INFRA_STORAGE_CLASS_ENFORCEMENT")

Expand Down Expand Up @@ -75,6 +76,13 @@ func handle() {
}
klog.V(2).Infof("Driver vendor %v %v", service.VendorName, service.VendorVersion)

if (infraClusterLabels == nil || *infraClusterLabels == "") && !*runNodeService {
klog.Fatal("infra-cluster-labels must be set")
}
if volumePrefix == nil || *volumePrefix == "" {
klog.Fatal("volume-prefix must be set")
}

inClusterConfig, err := rest.InClusterConfig()
if err != nil {
klog.Fatalf("Failed to build in cluster config: %v", err)
Expand Down Expand Up @@ -104,7 +112,9 @@ func handle() {
klog.Fatalf("Failed to build tenant client set: %v", err)
}

virtClient, err := kubevirt.NewClient(infraRestConfig)
infraClusterLabelsMap := parseLabels()

virtClient, err := kubevirt.NewClient(infraRestConfig, infraClusterLabelsMap, *volumePrefix)
if err != nil {
klog.Fatal(err)
}
Expand All @@ -128,7 +138,6 @@ func handle() {
}
}

infraClusterLabelsMap := parseLabels()
var storageClassEnforcement util.StorageClassEnforcement
//prase yaml
if infraStorageClassEnforcement == "" {
Expand Down
142 changes: 142 additions & 0 deletions e2e/create-pvc_test.go
Expand Up @@ -11,16 +11,19 @@ import (

"k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog/v2"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"github.com/spf13/pflag"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes"
"kubevirt.io/client-go/kubecli"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)

var virtClient kubecli.KubevirtClient
Expand Down Expand Up @@ -282,6 +285,145 @@ var _ = Describe("CreatePVC", func() {
Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, attacherPodFs),
Entry("Block volume mode", k8sv1.PersistentVolumeBlock, attacherPodBlock),
)

Context("Should prevent access to volumes from infra cluster", func() {
var tenantPVC *k8sv1.PersistentVolumeClaim
var tenantPV *k8sv1.PersistentVolume
var infraDV *cdiv1.DataVolume

AfterEach(func() {
By("Cleaning up resources for test")
if tenantPVC != nil {
err := tenantClient.CoreV1().PersistentVolumeClaims(tenantPVC.Namespace).Delete(context.Background(), tenantPVC.Name, metav1.DeleteOptions{})
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
_, err := tenantClient.CoreV1().PersistentVolumeClaims(tenantPVC.Namespace).Get(context.Background(), tenantPVC.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, 1*time.Minute, 2*time.Second).Should(BeTrue(), "tenant pvc should disappear")
}
tenantPVC = nil
}
if tenantPV != nil {
err := tenantClient.CoreV1().PersistentVolumes().Delete(context.Background(), tenantPV.Name, metav1.DeleteOptions{})
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
// For some reason this takes about 2 minutes.
Eventually(func() bool {
_, err := tenantClient.CoreV1().PersistentVolumes().Get(context.Background(), tenantPV.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, 3*time.Minute, 2*time.Second).Should(BeTrue(), "tenant pv should disappear")
}
tenantPV = nil
}
if infraDV != nil {
_, err := virtClient.CdiClient().CdiV1beta1().DataVolumes(InfraClusterNamespace).Get(context.Background(), infraDV.Name, metav1.GetOptions{})
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
err = virtClient.CdiClient().CdiV1beta1().DataVolumes(InfraClusterNamespace).Delete(context.Background(), infraDV.Name, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
}
infraDV = nil
}
})

It("should not be able to create a PV and access a volume from the infra cluster that is not labeled", func() {
infraDV = &cdiv1.DataVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "infra-pvc",
Namespace: InfraClusterNamespace,
Annotations: map[string]string{
"cdi.kubevirt.io/storage.deleteAfterCompletion": "false",
},
},
Spec: cdiv1.DataVolumeSpec{
Source: &cdiv1.DataVolumeSource{
Blank: &cdiv1.DataVolumeBlankImage{},
},
Storage: &cdiv1.StorageSpec{
Resources: k8sv1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
}
var err error
infraDV, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(InfraClusterNamespace).Create(context.Background(), infraDV, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
infraDV, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(InfraClusterNamespace).Get(context.Background(), infraDV.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "infraDV.Status.Phase: %s\n", infraDV.Status.Phase)
return infraDV.Status.Phase == cdiv1.Succeeded
}, 1*time.Minute, 2*time.Second).Should(BeTrue(), "infra dv should be ready")

By("Creating a specially crafted PV, attempt to access volume from infra cluster that should not be accessed")
tenantPV = &k8sv1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "tenant-pv",
},
Spec: k8sv1.PersistentVolumeSpec{
AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce},
Capacity: k8sv1.ResourceList{k8sv1.ResourceStorage: resource.MustParse("1Gi")},
PersistentVolumeSource: k8sv1.PersistentVolumeSource{
CSI: &k8sv1.CSIPersistentVolumeSource{
Driver: "csi.kubevirt.io",
VolumeHandle: infraDV.Name,
VolumeAttributes: map[string]string{
"bus": "scsi",
"serial": "abcd",
"storage.kubernetes.io/csiProvisionerIdentity": "1708112628060-923-csi.kubevirt.io",
},
FSType: "ext4",
},
},
StorageClassName: "kubevirt",
PersistentVolumeReclaimPolicy: k8sv1.PersistentVolumeReclaimDelete,
},
}
_, err = tenantClient.CoreV1().PersistentVolumes().Create(context.Background(), tenantPV, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
tenantPVC = &k8sv1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "tenant-pvc",
},
Spec: k8sv1.PersistentVolumeClaimSpec{
AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce},
Resources: k8sv1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceStorage: resource.MustParse("1Gi"),
},
},
VolumeName: tenantPV.Name,
},
}
tenantPVC, err = tenantClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), tenantPVC, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
pod := writerPodFs(tenantPVC.Name)
By("Creating pod that attempts to use the specially crafted PVC")
pod, err = tenantClient.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
defer deletePod(tenantClient.CoreV1(), namespace, pod.Name)

involvedObject := fmt.Sprintf("involvedObject.name=%s", pod.Name)
By("Waiting for error event to show up in pod event log")
Eventually(func() bool {
list, err := tenantClient.CoreV1().Events(namespace).List(context.Background(), metav1.ListOptions{
FieldSelector: involvedObject, TypeMeta: metav1.TypeMeta{Kind: "Pod"},
})
Expect(err).ToNot(HaveOccurred())
for _, event := range list.Items {
klog.Infof("Event: %s [%s]", event.Message, event.Reason)
if event.Reason == "FailedAttachVolume" && strings.Contains(event.Message, "invalid volume name") {
return true
}
}
return false
}, 30*time.Second, time.Second).Should(BeTrue(), "error event should show up in pod event log")
})
})
})

func writerPodFs(volumeName string) *k8sv1.Pod {
Expand Down
1 change: 1 addition & 0 deletions hack/cluster-sync.sh
Expand Up @@ -8,6 +8,7 @@ INFRA_STORAGE_CLASS=${INFRA_STORAGE_CLASS:-local}
REGISTRY=${REGISTRY:-192.168.66.2:5000}
TARGET_NAME=${TARGET_NAME:-kubevirt-csi-driver}
TAG=${TAG:-latest}
export INFRACLUSTER_LABELS=${INFRACLUSTER_LABELS:-"tenant-cluster=${TENANT_CLUSTER_NAMESPACE}"}

function tenant::deploy_kubeconfig_secret() {
TOKEN=$(_kubectl create token kubevirt-csi -n $TENANT_CLUSTER_NAMESPACE)
Expand Down
58 changes: 52 additions & 6 deletions pkg/kubevirt/client.go
Expand Up @@ -2,13 +2,18 @@ package kubevirt

import (
"context"
goerrors "errors"
"fmt"
"strings"
"time"

"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
kubevirtv1 "kubevirt.io/api/core/v1"
cdicli "kubevirt.io/client-go/generated/containerized-data-importer/clientset/versioned"
"kubevirt.io/client-go/kubecli"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
Expand All @@ -33,12 +38,15 @@ type Client interface {
}

type client struct {
kubernetesClient *kubernetes.Clientset
kubernetesClient kubernetes.Interface
virtClient kubecli.KubevirtClient
cdiClient cdicli.Interface
infraLabelMap map[string]string
volumePrefix string
}

// NewClient New creates our client wrapper object for the actual kubeVirt and kubernetes clients we use.
func NewClient(config *rest.Config) (Client, error) {
func NewClient(config *rest.Config, infraClusterLabelMap map[string]string, prefix string) (Client, error) {
result := &client{}

clientset, err := kubernetes.NewForConfig(config)
Expand All @@ -51,10 +59,26 @@ func NewClient(config *rest.Config) (Client, error) {
if err != nil {
return nil, err
}
cdiClient, err := cdicli.NewForConfig(config)
if err != nil {
return nil, err
}
result.virtClient = kubevirtClient
result.cdiClient = cdiClient
result.infraLabelMap = infraClusterLabelMap
result.volumePrefix = fmt.Sprintf("%s-", prefix)
return result, nil
}

func containsLabels(a, b map[string]string) bool {
for k, v := range b {
if a[k] != v {
return false
}
}
return true
}

// AddVolumeToVM performs a hotplug of a DataVolume to a VM
func (c *client) AddVolumeToVM(namespace string, vmName string, hotPlugRequest *kubevirtv1.AddVolumeOptions) error {
return c.virtClient.VirtualMachineInstance(namespace).AddVolume(context.TODO(), vmName, hotPlugRequest)
Expand Down Expand Up @@ -115,20 +139,42 @@ func (c *client) GetVirtualMachine(namespace, name string) (*kubevirtv1.VirtualM

// CreateDataVolume creates a new DataVolume under a namespace
func (c *client) CreateDataVolume(namespace string, dataVolume *cdiv1.DataVolume) (*cdiv1.DataVolume, error) {
return c.virtClient.CdiClient().CdiV1beta1().DataVolumes(namespace).Create(context.TODO(), dataVolume, metav1.CreateOptions{})
if !strings.HasPrefix(dataVolume.GetName(), c.volumePrefix) {
return nil, ErrInvalidVolume
} else {
return c.cdiClient.CdiV1beta1().DataVolumes(namespace).Create(context.TODO(), dataVolume, metav1.CreateOptions{})
}
}

// Ping performs a minimal request to the infra-cluster k8s api
func (c *client) Ping(ctx context.Context) error {
_, err := c.kubernetesClient.ServerVersion()
_, err := c.kubernetesClient.Discovery().ServerVersion()
return err
}

// DeleteDataVolume deletes a DataVolume from a namespace by name
func (c *client) DeleteDataVolume(namespace string, name string) error {
return c.virtClient.CdiClient().CdiV1beta1().DataVolumes(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if dv, err := c.GetDataVolume(namespace, name); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
} else if dv != nil {
return c.cdiClient.CdiV1beta1().DataVolumes(namespace).Delete(context.TODO(), dv.Name, metav1.DeleteOptions{})
}
return nil
}

func (c *client) GetDataVolume(namespace string, name string) (*cdiv1.DataVolume, error) {
return c.virtClient.CdiClient().CdiV1beta1().DataVolumes(namespace).Get(context.TODO(), name, metav1.GetOptions{})
dv, err := c.cdiClient.CdiV1beta1().DataVolumes(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if dv != nil {
if !containsLabels(dv.Labels, c.infraLabelMap) || !strings.HasPrefix(dv.GetName(), c.volumePrefix) {
return nil, ErrInvalidVolume
}
}
return dv, nil
}

var ErrInvalidVolume = goerrors.New("invalid volume name")
13 changes: 13 additions & 0 deletions pkg/kubevirt/client_suite_test.go
@@ -0,0 +1,13 @@
package kubevirt

import (
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

func TestClient(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "kubevirt client Suite")
}

0 comments on commit d3bdbce

Please sign in to comment.