diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index a2f6e4eda8..0000000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,28 +0,0 @@ - - -**Is this a BUG REPORT or FEATURE REQUEST?**: - -> Uncomment only one, leave it on its own line: -> -> /kind bug -> /kind feature - - -**What happened**: - -**What you expected to happen**: - -**How to reproduce it (as minimally and precisely as possible)**: - - -**Anything else we need to know?**: - -**Environment**: -- csi-vsphere version: -- vsphere-cloud-controller-manager version: -- Kubernetes version: -- vSphere version: -- OS (e.g. from /etc/os-release): -- Kernel (e.g. `uname -a`): -- Install tools: -- Others: diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index ebb972b613..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,22 +0,0 @@ - - -**What this PR does / why we need it**: - -**Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes # - -**Testing done**: -A PR must be marked "[WIP]", if no test result is provided. A WIP PR won't be reviewed, nor merged. -The requester can determine a sufficient test, e.g. build for a cosmetic change, E2E test in a predeployed setup, etc. -For new features, new tests should be done, in addition to regression tests. -If jtest is used to trigger precheckin tests, paste the result after jtest completes and remove [WIP] in the PR subject. -The review cycle will start, only after "[WIP]" is removed from the PR subject. - -**Special notes for your reviewer**: - -**Release note**: - -```release-note -``` diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 22d12927aa..af27b77b49 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,6 +5,8 @@ stages: - e2e-tests-dev - deploy-staging - tests-staging + - prod-rollout + - cleanup-dev run-unit-tests: stage: unit-test @@ -51,13 +53,16 @@ deploy-images-dev: stage: deploy-dev # This resource group is configured with process_mode=oldest_first to make sure the pipelines are run serially. resource_group: production - # Image built from cd-infra/images/ci-deploy/Dockerfile from Calatrava project. + # Image built from cd-infra/images/ci-deploy/Dockerfile. image: $CNS_IMAGE_CI_DEPLOY_STAGE script: - ./pipeline/deploy.sh dependencies: - build-images artifacts: + paths: + - ./env.json + - ./sv_kubeconfig_content.yaml reports: dotenv: build.env @@ -65,15 +70,26 @@ e2e-tests-dev: stage: e2e-tests-dev # This resource group is configured with process_mode=oldest_first to make sure the pipelines are run serially. resource_group: production - image: $CNS_IMAGE_GOLANG + # Image built by docker file images/ci/e2e/Dockerfile. + # Command to build new image is + # docker build -t --platform=Linux/x86_64 -f Dockerfile . + # docker tag / + # docker push / + image: $CSI_FVT_GOLANG dependencies: - deploy-images-dev script: - ./pipeline/e2e-tests.sh + artifacts: + paths: + - ./env.json + - ./sv_kubeconfig_content.yaml + reports: + dotenv: build.env deploy-images-staging: stage: deploy-staging - # Image built from cd-infra/images/ci-deploy/Dockerfile from Calatrava project. + # Image built from cd-infra/images/ci-deploy/Dockerfile. image: $CNS_IMAGE_CI_DEPLOY_STAGE script: - ./pipeline/deploy-staging.sh @@ -81,14 +97,15 @@ deploy-images-staging: - build-images only: - master - artifacts: - reports: - dotenv: build.env e2e-tests-staging: stage: tests-staging - # Image built from cd-infra//images/ci-e2e/Dockerfile from Calatrava project. - image: $CNS_IMAGE_E2E + # Image built by docker file images/ci/e2e/Dockerfile. + # Command to build new image is + # docker build -t --platform=Linux/x86_64 -f Dockerfile . + # docker tag / + # docker push / + image: $CSI_FVT_GOLANG dependencies: - deploy-images-staging script: @@ -98,7 +115,7 @@ e2e-tests-staging: system-tests-staging: stage: tests-staging - # Image built from cd-infra//images/ci-e2e/Dockerfile from Calatrava project. + # Image built from cd-infra//images/ci-e2e/Dockerfile. image: $CNS_IMAGE_E2E dependencies: - deploy-images-staging @@ -109,7 +126,7 @@ system-tests-staging: perf-tests-staging: stage: tests-staging - # Image built from cd-infra//images/ci-e2e/Dockerfile from Calatrava project. + # Image built from cd-infra//images/ci-e2e/Dockerfile. image: $CNS_IMAGE_E2E dependencies: - deploy-images-staging @@ -117,3 +134,25 @@ perf-tests-staging: - echo "TODO - Add perf tests." only: - master + +patch-prod-images: + stage: prod-rollout + # Image built from cd-infra/images/ci-deploy/Dockerfile + image: $CNS_IMAGE_CI_DEPLOY_STAGE + script: + - ./pipeline/patch-prod.sh + dependencies: + - build-images + only: + - master + +cleanup-dev: + stage: cleanup-dev + image: $CNS_IMAGE_CI_DEPLOY_STAGE + when: always + artifacts: + reports: + dotenv: build.env + script: + - chmod 777 pipeline/release-testbed.sh + - ./pipeline/release-testbed.sh diff --git a/README.md b/README.md index e0f91997ad..c5fcdf3c80 100644 --- a/README.md +++ b/README.md @@ -5,20 +5,16 @@ The vSphere CSI Driver is a Kubernetes plugin that allows persistent storage for This driver is in a stable `GA` state and is suitable for production use. -The vSphere CSI Driver is supported on vSphere 6.7U3 and later versions. -If you are using an earlier version of vSphere, you may need to upgrade to a supported version to use the vSphere CSI Driver. -It's also important to note that the vSphere CSI Driver has different versions, and each version may have different requirements or limitations, so it's essential to check the [documentation](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-D4AAD99E-9128-40CE-B89C-AD451DA8379D.html) for your specific version. - It is recommended to install an out-of-tree Cloud Provider Interface like [vSphere Cloud Provider Interface](https://github.com/kubernetes/cloud-provider-vsphere) in the Kubernetes cluster to keep the Kubernetes cluster fully operational. ## Documentation Documentation for vSphere CSI Driver is available here: -* [vSphere CSI Driver Concepts](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-74AF02D7-1562-48BD-A9FE-C81A53342AC3.html) -* [vSphere CSI Driver Features](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-D4AAD99E-9128-40CE-B89C-AD451DA8379D.html#GUID-E59B13F5-6F49-4619-9877-DF710C365A1E) -* [vSphere CSI Driver Deployment Guide](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-6DBD2645-FFCF-4076-80BE-AD44D7141521.html) -* [vSphere CSI Driver User Guide](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-6DBD2645-FFCF-4076-80BE-AD44D7141521.html) +* [vSphere CSI Driver Concepts](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-74AF02D7-1562-48BD-A9FE-C81A53342AC3.html) +* [vSphere CSI Driver Features](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-D4AAD99E-9128-40CE-B89C-AD451DA8379D.html#GUID-E59B13F5-6F49-4619-9877-DF710C365A1E) +* [vSphere CSI Driver Deployment Guide](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-6DBD2645-FFCF-4076-80BE-AD44D7141521.html) +* [vSphere CSI Driver User Guide](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-6DBD2645-FFCF-4076-80BE-AD44D7141521.html) ## vSphere CSI Driver Releases @@ -28,7 +24,6 @@ Documentation for vSphere CSI Driver is available here: * [Release 2.5](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.5/rn/vmware-vsphere-container-storage-plugin-25-release-notes/index.html) * [Release 2.4](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.4/rn/vmware-vsphere-container-storage-plugin-24-release-notes/index.html) * [Release 2.3](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.3/rn/vmware-vsphere-container-storage-plugin-23-release-notes/index.html) -* [Release 2.2](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.2/rn/vmware-vsphere-container-storage-plugin-22-release-notes/index.html) ## Contributing diff --git a/cmd/syncer/main.go b/cmd/syncer/main.go index 39dadf18f9..fa9292cf30 100644 --- a/cmd/syncer/main.go +++ b/cmd/syncer/main.go @@ -22,15 +22,21 @@ import ( "fmt" "net/http" "os" + "os/signal" + "strings" + "syscall" "time" "github.com/kubernetes-csi/csi-lib-utils/leaderelection" "github.com/prometheus/client_golang/prometheus/promhttp" cnstypes "github.com/vmware/govmomi/cns/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/node" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/prometheus" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/utils" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common/commonco" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" @@ -94,6 +100,27 @@ func main() { *internalFSSName, *internalFSSNamespace, "", *operationMode) admissionhandler.COInitParams = &syncer.COInitParams + // Disconnect VC session on restart + defer func() { + log.Info("Cleaning up vc sessions") + if r := recover(); r != nil { + cleanupSessions(ctx, r) + } + }() + + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGTERM) + go func() { + for { + sig := <-ch + if sig == syscall.SIGTERM { + log.Info("SIGTERM signal received") + utils.LogoutAllvCenterSessions(ctx) + os.Exit(0) + } + } + }() + if *operationMode == operationModeWebHookServer { log.Infof("Starting container with operation mode: %v", operationModeWebHookServer) if webHookStartError := admissionhandler.StartWebhookServer(ctx); webHookStartError != nil { @@ -112,6 +139,12 @@ func main() { // K8sCloudOperator should run on every node where csi controller can run. if clusterFlavor == cnstypes.CnsClusterFlavorWorkload { go func() { + defer func() { + log.Info("Cleaning up vc sessions cloud operator service") + if r := recover(); r != nil { + cleanupSessions(ctx, r) + } + }() if err := k8scloudoperator.InitK8sCloudOperatorService(ctx); err != nil { log.Fatalf("Error initializing K8s Cloud Operator gRPC sever. Error: %+v", err) } @@ -120,6 +153,12 @@ func main() { // Go module to keep the metrics http server running all the time. go func() { + defer func() { + log.Info("Cleaning up vc sessions prometheus metrics") + if r := recover(); r != nil { + cleanupSessions(ctx, r) + } + }() prometheus.SyncerInfo.WithLabelValues(syncer.Version).Set(1) for { log.Info("Starting the http server to expose Prometheus metrics..") @@ -172,7 +211,13 @@ func initSyncerComponents(ctx context.Context, clusterFlavor cnstypes.CnsCluster coInitParams *interface{}) func(ctx context.Context) { return func(ctx context.Context) { log := logger.GetLogger(ctx) - + // Disconnect vCenter sessions on restart + defer func() { + log.Info("Cleaning up vc sessions syncer components") + if r := recover(); r != nil { + cleanupSessions(ctx, r) + } + }() if err := manager.InitCommonModules(ctx, clusterFlavor, coInitParams); err != nil { log.Errorf("Error initializing common modules for all flavors. Error: %+v", err) os.Exit(1) @@ -197,35 +242,98 @@ func initSyncerComponents(ctx context.Context, clusterFlavor cnstypes.CnsCluster // Initialize CNS Operator for Supervisor clusters. if clusterFlavor == cnstypes.CnsClusterFlavorWorkload { go func() { + defer func() { + log.Info("Cleaning up vc sessions storage pool service") + if r := recover(); r != nil { + cleanupSessions(ctx, r) + } + }() if err := storagepool.InitStoragePoolService(ctx, configInfo, coInitParams); err != nil { log.Errorf("Error initializing StoragePool Service. Error: %+v", err) - os.Exit(1) + utils.LogoutAllvCenterSessions(ctx) + os.Exit(0) } }() } if clusterFlavor == cnstypes.CnsClusterFlavorVanilla { // Initialize node manager so that syncer components can // retrieve NodeVM using the NodeID. - useNodeUuid := false - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.UseCSINodeId) { - useNodeUuid = true - } nodeMgr := &node.Nodes{} - err = nodeMgr.Initialize(ctx, useNodeUuid) + err = nodeMgr.Initialize(ctx) if err != nil { log.Errorf("failed to initialize nodeManager. Error: %+v", err) os.Exit(1) } + if configInfo.Cfg.Global.ClusterDistribution == "" { + config, err := rest.InClusterConfig() + if err != nil { + log.Errorf("failed to get InClusterConfig: %v", err) + os.Exit(1) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + log.Errorf("failed to create kubernetes client with err: %v", err) + os.Exit(1) + } + + // Get the version info for the Kubernetes API server + versionInfo, err := clientset.Discovery().ServerVersion() + if err != nil { + log.Errorf("failed to fetch versionInfo with err: %v", err) + os.Exit(1) + } + + // Extract the version string from the version info + version := versionInfo.GitVersion + var ClusterDistNameToServerVersion = map[string]string{ + "gke": "Anthos", + "racher": "Rancher", + "rke": "Rancher", + "docker": "DockerEE", + "dockeree": "DockerEE", + "openshift": "Openshift", + "wcp": "Supervisor", + "vmware": "TanzuKubernetesCluster", + "nativek8s": "VanillaK8S", + } + distributionUnknown := true + for distServerVersion, distName := range ClusterDistNameToServerVersion { + if strings.Contains(version, distServerVersion) { + configInfo.Cfg.Global.ClusterDistribution = distName + distributionUnknown = false + break + } + } + if distributionUnknown { + configInfo.Cfg.Global.ClusterDistribution = ClusterDistNameToServerVersion["nativek8s"] + } + } } go func() { + defer func() { + log.Info("Cleaning up vc sessions cns operator") + if r := recover(); r != nil { + cleanupSessions(ctx, r) + } + }() if err := manager.InitCnsOperator(ctx, clusterFlavor, configInfo, coInitParams); err != nil { log.Errorf("Error initializing Cns Operator. Error: %+v", err) - os.Exit(1) + utils.LogoutAllvCenterSessions(ctx) + os.Exit(0) } }() if err := syncer.InitMetadataSyncer(ctx, clusterFlavor, configInfo); err != nil { log.Errorf("Error initializing Metadata Syncer. Error: %+v", err) - os.Exit(1) + utils.LogoutAllvCenterSessions(ctx) + os.Exit(0) } } } + +func cleanupSessions(ctx context.Context, r interface{}) { + log := logger.GetLogger(ctx) + log.Errorf("Observed a panic and a restart was invoked, panic: %+v", r) + log.Info("Recovered from panic. Disconnecting the existing vc sessions.") + utils.LogoutAllvCenterSessions(ctx) + os.Exit(0) +} diff --git a/cmd/vsphere-csi/main.go b/cmd/vsphere-csi/main.go index a6ff940069..c652ac505c 100644 --- a/cmd/vsphere-csi/main.go +++ b/cmd/vsphere-csi/main.go @@ -17,11 +17,15 @@ limitations under the License. package main import ( + "context" "flag" "fmt" "os" + "os/signal" + "syscall" csiconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/utils" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common/commonco" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" @@ -66,7 +70,37 @@ func main() { log.Error("CSI endpoint cannot be empty. Please set the env variable.") os.Exit(1) } + log.Info("Enable logging off for vCenter sessions on exit") + // Disconnect VC session on restart + defer func() { + if r := recover(); r != nil { + log.Info("Cleaning up vc sessions") + cleanupSessions(ctx, r) + } + }() + + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGTERM) + go func() { + for { + sig := <-ch + if sig == syscall.SIGTERM { + log.Info("SIGTERM signal received") + utils.LogoutAllvCenterSessions(ctx) + os.Exit(0) + } + } + }() vSphereCSIDriver := service.NewDriver() vSphereCSIDriver.Run(ctx, CSIEndpoint) + +} + +func cleanupSessions(ctx context.Context, r interface{}) { + log := logger.GetLogger(ctx) + log.Errorf("Observed a panic and a restart was invoked, panic: %+v", r) + log.Info("Recovered from panic. Disconnecting the existing vc sessions.") + utils.LogoutAllvCenterSessions(ctx) + os.Exit(0) } diff --git a/docs/book/features/csi_driver_on_windows.md b/docs/book/features/csi_driver_on_windows.md deleted file mode 100644 index 3e9248c93d..0000000000 --- a/docs/book/features/csi_driver_on_windows.md +++ /dev/null @@ -1,165 +0,0 @@ - - -# vSphere CSI Driver - Windows Support - -- [Introduction](#introduction) -- [Prerequisite](#prereq) -- [How to enable vSphere CSI with windows nodes](#how-to-enable-vsphere-csi-win) -- [Examples to Deploy a Windows pod with PVC mount](#examples) - -## Introduction - -Windows node support is added in vSphere CSI driver v2.4.0 as an Alpha feature. - -Following features are not supported for Windows Node: - -1. ReadWriteMany volumes based on vSAN file service are not supported on Windows Node. -2. [Raw Block Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#raw-block-volume-support) are not supported. -3. Windows Nodes will be used as Worker nodes only. vSphere CSI will not support a mixture of Linux worker nodes and Windows Worker Nodes. - -## Prerequisite - -In addition to prerequisites mentioned [here](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-0AB6E692-AA47-4B6A-8CEA-38B754E16567.html), following needs to be fullfilled to support windows in vSphere CSI: - -1. Minimum kubernetes version required is 1.20. -2. Minimum vSphere CSI driver version required is 2.4. -3. Master nodes should be running Linux. -4. Worker nodes should have Windows server 2019. Other windows server version are not supported. Please refer [this](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/) -5. if containerd is used in nodes, containerd version should be greater than or equal to 1.5, refer: https://github.com/containerd/containerd/issues/5405 -6. `CSI Proxy` should be installed in each of the Windows nodes. To install csi proxy follow steps from https://github.com/kubernetes-csi/csi-proxy#installation - -## How to enable vSphere CSI with Windows nodes - -- Install vSphere CSI driver 2.4 by following https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-A1982536-F741-4614-A6F2-ADEE21AA4588.html -- To enable windows support, patch the configmap to enable csi-windows-support feature switch by running following command: - - ```bash - kubectl patch configmap/internal-feature-states.csi.vsphere.vmware.com \ - -n vmware-system-csi \ - --type merge \ - -p '{"data":{"csi-windows-support":"true"}}' - ``` - -- vSphere CSI driver v2.4.0 introduces a new node daemonset which will be running on all windows nodes. To verify this run: - - ```bash - $ kubectl get daemonsets vsphere-csi-node-windows --namespace=vmware-system-csi - NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE - vsphere-csi-node-windows 1 1 1 1 1 kubernetes.io/os=windows 7m10s - ``` - -## Examples - -- Define a storage class example-windows-sc.yaml as defined [here](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/example/vanilla-k8s-RWO-filesystem-volumes/example-windows-sc.yaml) - - ```bash - kind: StorageClass - apiVersion: storage.k8s.io/v1 - metadata: - name: example-windows-sc - provisioner: csi.vsphere.vmware.com - allowVolumeExpansion: true # Optional: only applicable to vSphere 7.0U1 and above - parameters: - storagepolicyname: "vSAN Default Storage Policy" # Optional Parameter - #datastoreurl: "ds:///vmfs/volumes/vsan:52cdfa80721ff516-ea1e993113acfc77/" # Optional Parameter - #csi.storage.k8s.io/fstype: "ntfs" # Optional Parameter - ``` - - 'csi.storage.k8s.io/fstype' is an optional parameter. From the Windows file systems, only ntfs can be set to its value, as vSphere CSI Driver can only support the NTFS filesystem on Windows Nodes. - -- Import this `StorageClass` into `Vanilla Kubernetes` cluster: - - ```bash - kubectl create -f example-sc.yaml - ``` - -- Define a `PersistentVolumeClaim` request example-windows-pvc.yaml as shown in the spec [here](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/example/vanilla-k8s-RWO-filesystem-volumes/example-windows-pvc.yaml) - - ```bash - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: example-windows-pvc - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi - storageClassName: example-windows-sc - volumeMode: Filesystem - ``` - - The pvc definition is same as Linux and only 'ReadWriteOnce' can be specified as accessModes. - -- Import this `PersistentVolumeClaim` into `Vanilla Kubernetes` cluster: - - ```bash - kubectl create -f example-windows-pvc.yaml - ``` - -- Verify a `PersistentVolume` was created successfully - - The `Status` should say `Bound`. - - ```bash - $ kubectl describe pvc example-windows-pvc - Name: example-windows-pvc - Namespace: default - StorageClass: example-windows-sc - Status: Bound - Volume: pvc-e64e0716-ff63-47b8-8ee4-d1eb4f86dcd7 - Labels: - Annotations: pv.kubernetes.io/bind-completed: yes - pv.kubernetes.io/bound-by-controller: yes - volume.beta.kubernetes.io/storage-provisioner: csi.vsphere.vmware.com - Finalizers: [kubernetes.io/pvc-protection] - Capacity: 5Gi - Access Modes: RWO - VolumeMode: Filesystem - Used By: example-windows-pod - Events: - ``` - -- To use the above pvc in a Windows pod, you can create a pod spec example-windows-pod.yaml like [this](https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/master/example/vanilla-k8s-RWO-filesystem-volumes/example-windows-pod.yaml) - - ```bash - apiVersion: v1 - kind: Pod - metadata: - name: example-windows-pod - spec: - nodeSelector: - kubernetes.io/os: windows - containers: - - name: test-container - image: mcr.microsoft.com/windows/servercore:ltsc2019 - command: - - "powershell.exe" - - "-Command" - - "while (1) { Add-Content -Encoding Ascii C:\\test\\data.txt $(Get-Date -Format u); sleep 1 }" - volumeMounts: - - name: test-volume - mountPath: "/test/" - readOnly: false - volumes: - - name: test-volume - persistentVolumeClaim: - claimName: example-windows-pvc - ``` - -- Create the pod - - ```bash - kubectl create -f example-windows-pod.yaml - ``` - -- Verify pod was created succussfully - - ```bash - $ kubectl get pod example-windows-pod - NAME READY STATUS RESTARTS AGE - example-windows-pod 1/1 Running 0 4m13s - ``` - - In this example, example-windows-pvc is formatted as NTFS file system and is mounted to "C:\\test" directory. diff --git a/docs/book/features/raw_block_volume.md b/docs/book/features/raw_block_volume.md deleted file mode 100644 index aec3bd9e68..0000000000 --- a/docs/book/features/raw_block_volume.md +++ /dev/null @@ -1,63 +0,0 @@ -# vSphere CSI Driver - Single-Access, Block Based Volume (Raw Block Volume) - -[Raw Block Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#raw-block-volume-support) feature in Kubernetes was promoted to stable in Kubernetes 1.18. -vSphere CSI Driver release `v2.3.0` onwards has Raw Block Volume feature released as `Alpha`. We do not recommend `Alpha` features for production use. Raw Block Volume feature is only supported for linux based nodes. - -This feature allows persistent volumes to be exposed inside containers as a block device instead of as a mounted file system. - -There are some specialized applications that require direct access to a block device because the file system layer introduces unneeded overhead. -The ability to use a raw block device without a filesystem will allow Kubernetes better support for high-performance applications that are capable of consuming and manipulating block storage for their needs. The most common case is databases (MongoDB, Cassandra) that require consistent I/O performance and low latency, which prefer to organize their data directly on the underlying storage. - -## Creating a new raw block PVC - -Create a storage class. - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: example-raw-block-sc -provisioner: csi.vsphere.vmware.com -``` - -To request a raw block PersistentVolumeClaim, volumeMode = "Block" must be specified in the PersistentVolumeClaimSpec. -Raw Block Volume should be created using accessModes `ReadWriteOnce`. vSphere CSI Driver does not support creating raw block volume using `ReadWriteMany` accessModes. - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: example-raw-block-pvc -spec: - volumeMode: Block - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi - storageClassName: example-raw-block-sc -``` - -## Using a raw block PVC - -When you use the PVC in a pod definition, you get to choose the device path for the block device rather than the mount path for the file system. - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: example-raw-block-pod -spec: - containers: - - name: test-container - image: gcr.io/google_containers/busybox:1.24 - command: ["/bin/sh", "-c", "while true ; do sleep 2 ; done"] - volumeDevices: - - devicePath: /dev/xvda - name: data - restartPolicy: Never - volumes: - - name: data - persistentVolumeClaim: - claimName: example-raw-block-pvc -``` diff --git a/docs/book/features/volume_snapshot.md b/docs/book/features/volume_snapshot.md deleted file mode 100644 index d84cea0cfa..0000000000 --- a/docs/book/features/volume_snapshot.md +++ /dev/null @@ -1,210 +0,0 @@ - - -# vSphere CSI Driver - Volume Snapshot & Restore - -- [Introduction](#introduction) -- [Prerequisite](#prereq) -- [How to enable Volume Snapshot & Restore feature in vSphere CSI](#how-to-deploy) -- [How to use Volume Snapshot & Restore feature](#how-to-use) -- [Configuration - Maximum Number of Snapshots per Volume](#config-param) - -## Introduction - -CSI Volume Snapshot & Restore feature was introduced as an alpha feature in Kubernetes 1.12, promoted to beta in Kubernetes 1.17 and moved to GA in Kubernetes 1.20. -Volume Snapshot & Restore feature will be added in vSphere CSI driver 2.4 as an Alpha feature. - -Known limitations for the Alpha feature in vSphere CSI driver 2.4 are listed below. - -1. It is only supported in ReadWriteOnce volumes based on First Class Disk, i.e., FCD or CNS block volume, while not yet supported in ReadWriteMany volumes based on vSAN file service. -2. It is only supported in [Vanilla Kubernetes](https://github.com/kubernetes/kubernetes) cluster now, while not yet supported in either [vSphere with Kubernetes](https://blogs.vmware.com/vsphere/2019/08/introducing-project-pacific.html) cluster aka Supervisor Cluster or [Tanzu Kubernetes Grid Service](https://blogs.vmware.com/vsphere/2020/03/vsphere-7-tanzu-kubernetes-clusters.html) cluster aka Guest Cluster. -3. Volume restore can only create a PVC with the same storage capacity as the source VolumeSnapshot. -4. vSphere CSI introduces a constraint on the maximum number of snapshots per ReadWriteOnce volume. The maximum is configurable but set to 3 by default. Please refer to the section of [Configuration - Maximum Number of Snapshots per Volume](#config-param) for more detail. -5. It is not supported to expand/delete volumes with snapshots. -6. It is not supported to snapshot CSI migrated volumes. - -## Prerequisite - -In addition to prerequisites mentioned [here](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-0AB6E692-AA47-4B6A-8CEA-38B754E16567.html), following prerequisites to be fulfilled to support Volume Snapshot & Restore feature in vSphere CSI: - -1. Minimum kubernetes version required is 1.20. -2. Minimum CSI upstream external-snapshotter/snapshot-controller version required is 4.1. -3. Minimum vSphere CSI driver version required is 2.4. -4. Minimum vSphere version required is 7.0U3. (The minimum version applies to both vCenter version and ESXi version) - -## How to enable Volume Snapshot & Restore feature in vSphere CSI - -- Install vSphere CSI driver 2.4 by following https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/installation.html -- To enable Volume Snapshot feature, patch the configmap to enable `block-volume-snapshot` feature switch by running following command: - - ```bash - $ kubectl patch configmap/internal-feature-states.csi.vsphere.vmware.com \ - -n vmware-system-csi \ - --type merge \ - -p '{"data":{"block-volume-snapshot":"true"}}' - ``` - -- To deploy required components for CSI volume snapshot feature, the following script is available for an easy deployment. -To get to know the step-by-step workflow of the script, please check out using the command `bash deploy-csi-snapshot-components.sh -h`. - - ```bash - $ wget https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/v2.4.0/manifests/vanilla/deploy-csi-snapshot-components.sh - $ bash deploy-csi-snapshot-components.sh - ✅ Verified that block-volume-snapshot feature is enabled - ... - ✅ Successfully deployed all components for CSI Snapshot feature. - ``` - - Below is the expected view in `vmware-system-csi` namespace when the deployment is completed in a single-master cluster: - - ```bash - $ kubectl -n vmware-system-csi get pod,deploy - NAME READY STATUS RESTARTS AGE - pod/vsphere-csi-controller-6c46964474-bcx5t 7/7 Running 0 164m - pod/vsphere-csi-node-8pspp 3/3 Running 0 3h55m - pod/vsphere-csi-node-lgthd 3/3 Running 0 3h55m - pod/vsphere-csi-node-nzvx8 3/3 Running 0 3h55m - pod/vsphere-csi-node-x4zch 3/3 Running 0 3h55m - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/vsphere-csi-controller 1/1 1 1 3h55m - ``` - - At this point, you are good to try out CSI Volume Snapshot & Restore feature in vSphere CSI driver. - -## How to use Volume Snapshot & Restore feature - -To use volume snapshot and restore feature in vSphere CSI, please refer to example yaml files for FileSystem volumes, [vanilla-k8s-RWO-filesystem-volumes](https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/example/vanilla-k8s-RWO-filesystem-volumes), and example yaml files for Raw Block volumes, [vanilla-k8s-RWO-Block-Volumes](https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/master/example/vanilla-k8s-RWO-Block-Volumes). Below is an example for FileSystem volumes. - -### Volume Snapshot - -#### Dynamic-provisioned Snapshot - -Below is an example StorageClass yaml from [vanilla-k8s-RWO-filesystem-volumes/example-sc.yaml](https://github.com/kubernetes-sigs/vsphere-csi-driver/blob/master/example/vanilla-k8s-RWO-filesystem-volumes/example-sc.yaml), with optional parameters being commented out. - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: example-vanilla-rwo-filesystem-sc - annotations: - storageclass.kubernetes.io/is-default-class: "true" # Optional -provisioner: csi.vsphere.vmware.com -allowVolumeExpansion: true # Optional: only applicable to vSphere 7.0U1 and above -#parameters: -# datastoreurl: "ds:///vmfs/volumes/vsan:52cdfa80721ff516-ea1e993113acfc77/" # Optional Parameter -# storagepolicyname: "vSAN Default Storage Policy" # Optional Parameter -# csi.storage.k8s.io/fstype: "ext4" # Optional Parameter -``` - -Create a StorageClass. - -```bash -$ kubectl apply -f example-sc.yaml -$ kubectl get sc -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -example-vanilla-rwo-filesystem-sc (default) csi.vsphere.vmware.com Delete Immediate true 2s -``` - -Create a PVC: - -```bash -$ kubectl apply -f example-pvc.yaml -$ kubectl get pvc -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -example-vanilla-rwo-pvc Bound pvc-2dc37ea0-dee0-4ad3-96ca-82f0159d7532 5Gi RWO example-vanilla-rwo-filesystem-sc 7s -``` - -Create a VolumeSnapshotClass: - -```bash -$ kubectl apply -f example-snapshotclass.yaml -$ kubectl get volumesnapshotclass -NAME DRIVER DELETIONPOLICY AGE -example-vanilla-rwo-filesystem-snapshotclass csi.vsphere.vmware.com Delete 4s -``` - -Create a VolumeSnapshot: - -```bash -$ kubectl apply -f example-snapshot.yaml -$ kubectl get volumesnapshot -NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE -example-vanilla-rwo-filesystem-snapshot true example-vanilla-rwo-pvc 5Gi example-vanilla-rwo-filesystem-snapshotclass snapcontent-a7c00b7f-f727-4010-9b1a-d546df9a8bab 57s 58s -``` - -#### Static-provisioned Snapshot - -Below are prerequisites for creating static-provisioned snapshot: - -1. Make sure a FCD snapshot on which the static-provisioned snapshot is created is available in your vSphere. -2. Construct the snapshotHandle based on the combination of FCD Volume ID and FCD Snapshot ID of the snapshot. For example, FCD Volume ID and FCD Snapshot ID of a FCD snapshot are `4ef058e4-d941-447d-a427-438440b7d306` and `766f7158-b394-4cc1-891b-4667df0822fa`. Then, the constructed snapshotHandle is `4ef058e4-d941-447d-a427-438440b7d306+766f7158-b394-4cc1-891b-4667df0822fa`. -3. Update the `spec.source.snapshotHandle` field in the VolumeSnapshotContent object of example-static-snapshot.yaml with the constructed snapshotHandle in step 2. - -Create a static-provisioned VolumeSnapshot: - -```bash -$ kubectl apply -f example-static-snapshot.yaml -$ kubectl get volumesnapshot static-vanilla-rwo-filesystem-snapshot -NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE -static-vanilla-rwo-filesystem-snapshot true static-vanilla-rwo-filesystem-snapshotcontent 5Gi static-vanilla-rwo-filesystem-snapshotcontent 76m 22m -``` - -### Volume Restore - -Make sure the VolumeSnapshot to be restored is available in the current Kubernetes cluster. - -```bash -$ kubectl get volumesnapshot -NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE -example-vanilla-rwo-filesystem-snapshot true example-vanilla-rwo-pvc 5Gi example-vanilla-rwo-filesystem-snapshotclass snapcontent-a7c00b7f-f727-4010-9b1a-d546df9a8bab 22m 22m -``` - -Create a PVC from a VolumeSnapshot: - -```bash -$ kubectl create -f example-restore.yaml -$ kubectl get pvc -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -example-vanilla-rwo-filesystem-restore Bound pvc-202c1dfc-78be-4835-89d5-110f739a87dd 5Gi RWO example-vanilla-rwo-filesystem-sc 78s -``` - -## Configuration - Maximum Number of Snapshots per Volume - -Per the [best practices for using VMware snapshots](https://kb.vmware.com/s/article/1025279), it is recommended to use only 2 to 3 snapshots per virtual disk for a better performance. -So, we make the global constraint, i.e., maximum number of snapshots per volume, configurable and meanwhile set the default to 3. -Additionally, the best-practice guideline only applies to virtual disks on VMFS and NFS datastores while not to those on VVOL and VSAN. -Therefore, we also introduces granular configuration parameters on the constraint, apart from the global configuration parameter. - -Below are configuration parameters available: - -- `global-max-snapshots-per-block-volume`: Global configuration parameter that applies to volumes on all kinds of datastores. By default, it is set to 3. -- `granular-max-snapshots-per-block-volume-vsan`: Granular configuration parameter on VSAN datastore only. It overrides the global constraint if set, while it falls back to the global constraint if unset. -- `granular-max-snapshots-per-block-volume-vvol`: Granular configuration parameter on VVOL datastore only. It overrides the global constraint if set, while it falls back to the global constraint if unset. - -**Note**: Users only need to configure it when the default constraint does not work for their user cases. For others, just skip the configuration below. - -Here is an example of vSphere CSI about how to configure the constraints. Firstly, delete the Secret that stores vSphere config. (Kubernetes doesn't allow to update Secret resources in place) - -```bash -kubectl delete secret vsphere-config-secret --namespace=vmware-system-csi -``` - -Secondly, update the config file of vSphere CSI and add configuration parameters for snapshot feature under the `[Snapshot]` section. - -```bash -$ cat /etc/kubernetes/csi-vsphere.conf -[Global] -... - -[Snapshot] -global-max-snapshots-per-block-volume = 5 # optional, set to 3 if unset -granular-max-snapshots-per-block-volume-vsan = 7 # optional, fall back to the global constraint if unset -granular-max-snapshots-per-block-volume-vvol = 8 # optional, fall back to the global constraint if unset -... -``` - -Finally, create a new Secret with the updated config file. - -```bash -kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf --namespace=vmware-system-csi -``` diff --git a/docs/book/features/vsphere_csi_migration.md b/docs/book/features/vsphere_csi_migration.md deleted file mode 100644 index 1033ce289a..0000000000 --- a/docs/book/features/vsphere_csi_migration.md +++ /dev/null @@ -1,250 +0,0 @@ - - -# vSphere CSI Driver - vSphere CSI Migration - -- [Introduction](#introduction) -- [Things to consider before turning on Migration](#consider-followings-before-turning-on-migration) -- [How to enable vSphere CSI Migration](#how-to-enable-vsphere-csi-migration) - -## Introduction - -**Note:** Feature to migrate in-tree vSphere volumes to CSI is released as **beta** with [v2.1.0](https://github.com/kubernetes-sigs/vsphere-csi-driver/releases/tag/v2.1.0). - -vSphere CSI driver and CNS bring in a lot of features that are not available in the in-tree vSphere volume plugin. - -Refer to the following feature comparisons table to know what is added in the vSphere CSI driver. - -| Feature | In-tree vSphere Volume Plugin | vSphere CSI Driver | -|---------|-------------------------------|--------------------| -| Block volume (`ReadWriteOnce` access mode) | Supported. Block volumes are backed by vmdks. | Supported. Block volumes are backed by vSphere Improved Virtual Disk(management layer on top of vmdk). | -| File volume (`ReadWriteMany`/`ReadOnlyMany` access modes) | Not Supported | Supported. File volumes are backed by vSAN file shares. | -| Dynamic provisioning(with and without SPBM) | Supported for block volumes only | Supported for block and file volumes | -| Static Provisioning | Supported for block volumes only | Supported for block and file volumes | -| Expand volume (Offline) | Not supported | Supported for block volumes | -| Storage vMotion of block volumes | Not supported | Not supported | -| vSphere UI integration(CNS dashboard, vSAN virtual objects, vSphere space) | Not supported | Supported | -| [Tanzu™ Kubernetes Grid™ Service](https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-kubernetes/GUID-152BE7D2-E227-4DAA-B527-557B564D9718.html) | Not supported | Supported | -| Tolerate Datacenter name, Datastore name and Storage policy name changes | Not Supported | supported | -| Volume Encryption | Not supported | Supported | -| Kubernetes Cluster spread across multiple vCenter Servers | Supported | Not supported | -| Kubernetes Cluster spread across multiple datacenters within a vCenter Server | Supported | Supported | -| Volume Topology(with `waitForFirstConsumer`) | Supported | Supported | -| Thick disk provisioning | Supported on all datastore types (vSAN, VVOL, VMFS and NFS) | Supported only on vSAN Datastore using Storage Policy Capability - `Object space reservation` | -| Raw Block Volume | Supported | Not supported | -| Inline volumes in Pod spec | Supported | Not supported | - -In addition to the above feature comparison, one of the most important things customers need to consider is that Kubernetes will deprecate In-tree vSphere volume plugin and it will be removed in the future Kubernetes releases. -Volumes provisioned using vSphere in-tree plugin do not get the additional new features supported by the vSphere CSI driver. - -Kubernetes has provided a seamless procedure to help migrate in-tree vSphere volumes to a vSphere CSI driver. After in-tree vSphere volumes migrated to vSphere CSI driver, all subsequent operations on migrated volumes are performed by the vSphere CSI driver. -Migrated vSphere volume will not get additional capabilities vSphere CSI driver supports. - -## Things to consider before turning on Migration - -- vSphere CSI Migration is released with `beta` feature-gate in Kubernetes 1.19. Refer [release note announcement](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md#csi-migration---azuredisk-and-vsphere-beta). -- Kubernetes 1.19 release has deprecated vSAN raw policy parameters for the in-tree vSphere Volume plugin and these parameters will be removed in a future release. Refer [deprecation announcement](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md#deprecation) -- Following vSphere in-tree StorageClass parameters will not be supported after enabling migration. - - `hostfailurestotolerate` - - `forceprovisioning` - - `cachereservation` - - `diskstripes` - - `objectspacereservation` - - `iopslimit` - - `diskformat` -- Storage Policy consumed by in-tree vSphere volume should not be renamed or deleted. Volume migration requires original storage policy used for provisioning the volume to be present on vCenter for registration of volume as Container Volume in vSphere. -- Datastore consumed by in-tree vSphere volume should not be renamed. Volume migration relies on the original datastore name present on the volume source for registration of volume as Container Volume in vSphere. -- For statically created vSphere in-tree Persistent Volume Claims and Persistent Volumes, make sure to add the following annotations before enabling migration. Statically provisioned in-tree vSphere volumes can't be migrated to CSI without adding these annotations. This also applies to new static in-tree PVs and PVCs created after the migration is enabled. - - Annotation on PV - - annotations: - pv.kubernetes.io/provisioned-by: kubernetes.io/vsphere-volume - - Annotation on PVC - - annotations: - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume - -- vSphere CSI Driver does not support Provisioning `eagerzeroedthick` and `zeroedthick` volume. After the migration is enabled, when a new volume is requested using the in-tree provisioner and `diskformat` parameter set to `eagerzeroedthick` or `zeroedthick`, volume creation will be failed by vSphere CSI Driver. Post migration only supported value for `diskformat` parameter will be `thin`. Existing volumes created before the migration using disk format `eagerzeroedthick` or `zeroedthick` will be migrated to CSI. -- vSphere CSI Driver does not support raw vSAN policy parameters. After the migration is enabled, when a new volume is requested using in-tree provisioner and vSAN raw policy parameters, Volume Creation will be failed by vSphere CSI Driver. -- vSphere CSI Migration requires vSphere 7.0u1. Customers who have in-tree vSphere volumes must upgrade vSphere to 7.0u1. Customers who do not need to migrate in-tree vSphere volumes can use vSphere 67u3 and above. -- vSphere CSI driver does not support volumes formatted with the Windows file system. Migrated in-tree vSphere volumes using the windows file system can't be used with vSphere CSI driver. -- In-tree vSphere volume plugin is heavily relying on the name of the datastore set on the PV’s Source. After migration is enabled, Storage DRS or vmotion should not be enabled. If storage DRS moves disk from one datastore to another further volume operations may break. - -## How to enable vSphere CSI Migration - -In Kubernetes 1.19 release, vSphere CSI Migration is available with `beta` feature-gates. - -To try out vSphere CSI migration in beta for vSphere plugin, perform the following steps. - -1. Upgrade vSphere to 7.0u1. -2. Upgrade kubernetes to 1.19 release. -3. Ensure that your version of kubectl is also at 1.19 or later. -4. Install vSphere Cloud Provider Interface (CPI). Please follow guideline mentioned at https://vsphere-csi-driver.sigs.k8s.io/driver-deployment/prerequisites.html#vsphere_cpi -5. Install vSphere CSI Driver [v2.3.0](https://github.com/kubernetes-sigs/vsphere-csi-driver/releases/tag/v2.3.0) - - Make sure to enable csi-migration feature gate in the deployment yaml file. - - apiVersion: v1 - data: - "csi-migration": "true" - kind: ConfigMap - metadata: - name: internal-feature-states.csi.vsphere.vmware.com - namespace: vmware-system-csi - -6. Install admission webhook. - - vSphere CSI driver does not support provisioning of volume by specifying migration specific parameters in the StorageClass. - These parameters were added by vSphere CSI translation library, and should not be used in the storage class directly. - - Validating admission controller helps prevent user from creating or updating StorageClass using `csi.vsphere.vmware.com` as provisioner with these parameters. - - - `csimigration` - - `datastore-migrationparam` - - `diskformat-migrationparam` - - `hostfailurestotolerate-migrationparam` - - `forceprovisioning-migrationparam` - - `cachereservation-migrationparam` - - `diskstripes-migrationparam` - - `objectspacereservation-migrationparam` - - `iopslimit-migrationparam` - - This Validating admission controller also helps prevent user from creating or updating StorageClass using `kubernetes.io/vsphere-volume` as provisioner with `AllowVolumeExpansion` to `true`. - - - Pre-requisite: `kubectl`, `openssl` and `base64` commands should be pre-installed on the system from where we can invoke admission webhook installation scripts. - - Installation steps: - - Script is available to deploy the admission webhook and it is located at https://github.com/kubernetes-sigs/vsphere-csi-driver/tree/v2.3.0/manifests/vanilla on the repository. - - Download the scripts - - $ curl -O https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/v2.3.0/manifests/vanilla/generate-signed-webhook-certs.sh - - $ curl -O https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/v2.3.0/manifests/vanilla/create-validation-webhook.sh - - $ curl -O https://raw.githubusercontent.com/kubernetes-sigs/vsphere-csi-driver/v2.3.0/manifests/vanilla/validatingwebhook.yaml - - - Generate the self-signed certificate - - $ bash generate-signed-webhook-certs.sh - creating certs in tmpdir /tmp/tmp.jmZqh2bAwJ - Generating RSA private key, 2048 bit long modulus (2 primes) - ........................................+++++ - ............+++++ - e is 65537 (0x010001) - certificatesigningrequest.certificates.k8s.io "vsphere-webhook-svc.vmware-system-csi" deleted - Warning: certificates.k8s.io/v1beta1 CertificateSigningRequest is deprecated in v1.19+, unavailable in v1.22+; use certificates.k8s.io/v1 CertificateSigningRequest - certificatesigningrequest.certificates.k8s.io/vsphere-webhook-svc.vmware-system-csi created - NAME AGE SIGNERNAME REQUESTOR CONDITION - vsphere-webhook-svc.vmware-system-csi 0s kubernetes.io/legacy-unknown kubernetes-admin Pending - certificatesigningrequest.certificates.k8s.io/vsphere-webhook-svc.vmware-system-csi approved - secret/vsphere-webhook-certs configured - - Create the validation webhook - - $ bash create-validation-webhook.sh - service/vsphere-webhook-svc created - validatingwebhookconfiguration.admissionregistration.k8s.io/validation.csi.vsphere.vmware.com created - serviceaccount/vsphere-csi-webhook created - role.rbac.authorization.k8s.io/vsphere-csi-webhook-role created - rolebinding.rbac.authorization.k8s.io/vsphere-csi-webhook-role-binding created - deployment.apps/vsphere-csi-webhook created - -7. Enable feature flags `CSIMigration` and `CSIMigrationvSphere` - - - `CSIMigrationvSphere` flag enables shims and translation logic to route volume operations from the vSphere in-tree plugin to vSphere CSI plugin. Supports falling back to in-tree vSphere plugin if a node does not have a vSphere CSI plugin installed and configured. - - `CSIMigrationvSphere` requires `CSIMigration` feature flag to be enabled. This flag is enabling CSI migration on the Kubernetes Cluster. - - 7.1 Steps for the control plane node(s) - - - Enable feature flags `CSIMigration` and `CSIMigrationvSphere` on `kube-controller` and `kubelet` on all control plane nodes. - - update kube-controller-manager manifest file and following arguments. This file is generally available at `/etc/kubernetes/manifests/kube-controller-manager.yaml` - - `- --feature-gates=CSIMigration=true,CSIMigrationvSphere=true` - - - update kubelet configuration file and add following flags. This file is generally available at `/var/lib/kubelet/config.yaml` - - featureGates: - CSIMigration: true - CSIMigrationvSphere: true - - - Restart the kubelet on the control plane nodes using the command: - - systemctl restart kubelet - - - Verify that the kubelet is functioning correctly using the following command: - - systemctl status kubelet - - - If there are any issues with the kubelet, check the logs on the control plane node using the following command: - - journalctl -xe - - 7.2 Steps for the worker node(s) - - - Enable feature flags `CSIMigration` and `CSIMigrationvSphere` on `kubelet` on all workload nodes. Please note that before changing the configuration on the Kubelet on each node we **must drain** the node (remove running application workloads). - - Node drain example. - - $ kubectl drain k8s-node1 --force --ignore-daemonsets - node/k8s-node1 cordoned - WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/vcppod; ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-amd64-gs7fr, kube-system/kube-proxy-rbjx4, vmware-system-csi/vsphere-csi-node-fh9f6 - evicting pod default/vcppod - pod/vcppod evicted - node/k8s-node1 evicted - - - After migration is enabled, make sure `csinodes` instance for the node is updated with `storage.alpha.kubernetes.io/migrated-plugins` annotation. - - $ kubectl describe csinodes k8s-node1 - Name: k8s-node1 - Labels: - Annotations: storage.alpha.kubernetes.io/migrated-plugins: kubernetes.io/vsphere-volume - CreationTimestamp: Wed, 29 Apr 2020 17:51:35 -0700 - Spec: - Drivers: - csi.vsphere.vmware.com: - Node ID: k8s-node1 - Events: - - - Restart the kubelet on the workload nodes using the command: - - systemctl restart kubelet - - - Verify that the kubelet is functioning correctly using the following command: - - systemctl status kubelet - - - If there are any issues with the kubelet, check the logs on the workload node using the following command: - - journalctl -xe - - - Once the kubelet is restarted, `uncordon` the node so that it can be used for scheduling workloads: - - kubectl uncordon k8s-node1 - - - Repeat these steps for all workload nodes in the Kubernetes Cluster. - -8. There is also an optional `CSIMigrationvSphereComplete` flag that can be enabled if all the nodes have CSI migration enabled. `CSIMigrationvSphereComplete` helps stop registering the vSphere in-tree plugin in kubelet and volume controllers and enables shims and translation logic to route volume operations from the vSphere in-tree plugin to vSphere CSI plugin. `CSIMigrationvSphereComplete` flag requires `CSIMigration` and `CSIMigrationvSphere` feature flags enabled and vSphere CSI plugin installed and configured on all nodes in the cluster. - -9. Verify vSphere in-tree PVCs and PVs are migrated to vSphere CSI driver, verify `pv.kubernetes.io/migrated-to: csi.vsphere.vmware.com` annotations are present on PVCs and PVs. - - Annotations on PVCs - - Annotations: pv.kubernetes.io/bind-completed: yes - pv.kubernetes.io/bound-by-controller: yes - pv.kubernetes.io/migrated-to: csi.vsphere.vmware.com - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume - - Annotations on PVs - - Annotations: kubernetes.io/createdby: vsphere-volume-dynamic-provisioner - pv.kubernetes.io/bound-by-controller: yes - pv.kubernetes.io/migrated-to: csi.vsphere.vmware.com - pv.kubernetes.io/provisioned-by: kubernetes.io/vsphere-volume - - New in-tree vSphere volumes created by vSphere CSI driver after migration is enabled, can be identified by following annotations. PV spec will still hold vSphere Volume Path, so in case when migration needs to be disabled, provisioned volume can be used by the in-tree vSphere plugin. - - Annotations on PVCs - - Annotations: pv.kubernetes.io/bind-completed: yes - pv.kubernetes.io/bound-by-controller: yes - volume.beta.kubernetes.io/storage-provisioner: csi.vsphere.vmware.com - - Annotations on PVs - - Annotations: pv.kubernetes.io/provisioned-by: csi.vsphere.vmware.com diff --git a/example/vanilla-k8s-RWO-Block-Volumes/example-raw-block-restore.yaml b/example/vanilla-k8s-RWO-Block-Volumes/example-raw-block-restore.yaml index 48121b5375..39392540de 100644 --- a/example/vanilla-k8s-RWO-Block-Volumes/example-raw-block-restore.yaml +++ b/example/vanilla-k8s-RWO-Block-Volumes/example-raw-block-restore.yaml @@ -8,6 +8,7 @@ spec: name: example-raw-block-snapshot kind: VolumeSnapshot apiGroup: snapshot.storage.k8s.io + volumeMode: Block accessModes: - ReadWriteOnce resources: diff --git a/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-WaitForFirstConsumer-restricted.yaml b/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-WaitForFirstConsumer-restricted.yaml index 3e94186961..7688874526 100644 --- a/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-WaitForFirstConsumer-restricted.yaml +++ b/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-WaitForFirstConsumer-restricted.yaml @@ -9,10 +9,10 @@ parameters: storagepolicyname: "vSAN Default Storage Policy" # Optional Parameter allowedTopologies: - matchLabelExpressions: - - key: topology.kubernetes.io/zone + - key: topology.csi.vmware.com/k8s-zone values: - us-west-CA - us-west-WA - - key: topology.kubernetes.io/region + - key: topology.csi.vmware.com/k8s-region values: - us-west diff --git a/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-multiple-zones.yaml b/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-multiple-zones.yaml index 5ce7e4d172..301f605d4b 100644 --- a/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-multiple-zones.yaml +++ b/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-multiple-zones.yaml @@ -8,10 +8,10 @@ parameters: storagepolicyname: "vSAN Default Storage Policy" # Optional Parameter allowedTopologies: - matchLabelExpressions: - - key: topology.kubernetes.io/zone + - key: topology.csi.vmware.com/k8s-zone values: - us-west-WA - us-west-CA - - key: topology.kubernetes.io/region + - key: topology.csi.vmware.com/k8s-region values: - us-west diff --git a/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-single-zone.yaml b/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-single-zone.yaml index 5a94e738ab..ad739a27d2 100644 --- a/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-single-zone.yaml +++ b/example/vanilla-k8s-RWO-filesystem-volumes/example-sc-single-zone.yaml @@ -8,9 +8,9 @@ parameters: storagepolicyname: "vSAN Default Storage Policy" # Optional Parameter allowedTopologies: - matchLabelExpressions: - - key: topology.kubernetes.io/zone + - key: topology.csi.vmware.com/k8s-zone values: - us-west-CA - - key: topology.kubernetes.io/region + - key: topology.csi.vmware.com/k8s-region values: - us-west diff --git a/go.mod b/go.mod index 7c844f12a2..5ace8f51ba 100644 --- a/go.mod +++ b/go.mod @@ -14,13 +14,13 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/kubernetes-csi/csi-lib-utils v0.11.0 github.com/kubernetes-csi/csi-proxy/client v1.0.1 - github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 + github.com/kubernetes-csi/external-snapshotter/client/v6 v6.1.0 github.com/onsi/ginkgo/v2 v2.8.3 github.com/onsi/gomega v1.27.0 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.4 github.com/vmware-tanzu/vm-operator-api v0.1.4-0.20211202183846-992b48c128ae - github.com/vmware/govmomi v0.30.6 + github.com/vmware/govmomi v0.32.0 go.uber.org/zap v1.24.0 golang.org/x/crypto v0.17.0 golang.org/x/net v0.19.0 @@ -29,17 +29,17 @@ require ( google.golang.org/protobuf v1.31.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.10 - k8s.io/apiextensions-apiserver v0.26.10 - k8s.io/apimachinery v0.26.10 - k8s.io/client-go v0.26.10 - k8s.io/kubectl v0.26.10 - k8s.io/kubernetes v1.26.10 - k8s.io/mount-utils v0.26.10 - k8s.io/pod-security-admission v0.26.10 - k8s.io/sample-controller v0.26.10 - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 - sigs.k8s.io/controller-runtime v0.13.0 + k8s.io/api v0.26.8 + k8s.io/apiextensions-apiserver v0.26.8 + k8s.io/apimachinery v0.26.8 + k8s.io/client-go v0.26.8 + k8s.io/kubectl v0.26.8 + k8s.io/kubernetes v1.26.8 + k8s.io/mount-utils v0.27.1 + k8s.io/pod-security-admission v0.26.8 + k8s.io/sample-controller v0.26.8 + k8s.io/utils v0.0.0-20230308161112-d77c459e9343 + sigs.k8s.io/controller-runtime v0.14.2 ) require ( @@ -133,7 +133,7 @@ require ( github.com/thecodeteam/gofsutil v0.1.2 // indirect github.com/vishvananda/netlink v1.1.0 // indirect github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect - github.com/xlab/treeprint v1.1.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect @@ -161,18 +161,18 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.26.10 // indirect - k8s.io/cli-runtime v0.26.10 // indirect - k8s.io/cloud-provider v0.26.10 // indirect - k8s.io/component-base v0.26.10 // indirect - k8s.io/component-helpers v0.26.10 // indirect + k8s.io/apiserver v0.26.8 // indirect + k8s.io/cli-runtime v0.26.8 // indirect + k8s.io/cloud-provider v0.26.8 // indirect + k8s.io/component-base v0.26.8 // indirect + k8s.io/component-helpers v0.26.8 // indirect k8s.io/cri-api v0.0.0 // indirect - k8s.io/csi-translation-lib v0.26.10 // indirect + k8s.io/csi-translation-lib v0.26.8 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/klog/v2 v2.90.1 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/kube-scheduler v0.26.10 // indirect - k8s.io/kubelet v0.26.10 // indirect + k8s.io/kube-scheduler v0.26.8 // indirect + k8s.io/kubelet v0.26.8 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/kustomize/api v0.12.1 // indirect @@ -185,35 +185,33 @@ replace ( github.com/go-logr/logr => github.com/go-logr/logr v1.2.0 github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1 github.com/kubernetes-csi/csi-lib-utils => github.com/kubernetes-csi/csi-lib-utils v0.11.0 - k8s.io/api => k8s.io/api v0.26.10 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.10 - k8s.io/apimachinery => k8s.io/apimachinery v0.26.10 - k8s.io/apiserver => k8s.io/apiserver v0.26.10 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.10 - k8s.io/client-go => k8s.io/client-go v0.26.10 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.10 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.10 - k8s.io/code-generator => k8s.io/code-generator v0.26.10 - k8s.io/component-base => k8s.io/component-base v0.26.10 - k8s.io/component-helpers => k8s.io/component-helpers v0.26.10 - k8s.io/controller-manager => k8s.io/controller-manager v0.26.10 - k8s.io/cri-api => k8s.io/cri-api v0.26.10 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.10 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.10 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.10 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.10 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.10 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.10 - k8s.io/kubectl => k8s.io/kubectl v0.26.10 - k8s.io/kubelet => k8s.io/kubelet v0.26.10 - k8s.io/kubernetes => k8s.io/kubernetes v1.26.10 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.10 - k8s.io/metrics => k8s.io/metrics v0.26.10 - k8s.io/mount-utils => k8s.io/mount-utils v0.24.6 - k8s.io/node-api => k8s.io/node-api v0.26.10 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.10 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.10 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.10 - k8s.io/sample-controller => k8s.io/sample-controller v0.26.10 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.14.5 + k8s.io/api => k8s.io/api v0.26.8 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.8 + k8s.io/apimachinery => k8s.io/apimachinery v0.26.8 + k8s.io/apiserver => k8s.io/apiserver v0.26.8 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.8 + k8s.io/client-go => k8s.io/client-go v0.26.8 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.8 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.8 + k8s.io/code-generator => k8s.io/code-generator v0.26.8 + k8s.io/component-base => k8s.io/component-base v0.26.8 + k8s.io/component-helpers => k8s.io/component-helpers v0.26.8 + k8s.io/controller-manager => k8s.io/controller-manager v0.26.8 + k8s.io/cri-api => k8s.io/cri-api v0.26.8 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.8 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.8 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.8 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.8 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.8 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.8 + k8s.io/kubectl => k8s.io/kubectl v0.26.8 + k8s.io/kubelet => k8s.io/kubelet v0.26.8 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.8 + k8s.io/metrics => k8s.io/metrics v0.26.8 + k8s.io/mount-utils => k8s.io/mount-utils v0.27.1 + k8s.io/node-api => k8s.io/node-api v0.26.8 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.8 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.8 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.8 + k8s.io/sample-controller => k8s.io/sample-controller v0.26.8 ) diff --git a/go.sum b/go.sum index 097c19ea06..3fe0726b2b 100644 --- a/go.sum +++ b/go.sum @@ -412,8 +412,8 @@ github.com/kubernetes-csi/csi-lib-utils v0.11.0 h1:FHWOBtAZBA/hVk7v/qaXgG9Sxv0/n github.com/kubernetes-csi/csi-lib-utils v0.11.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0= github.com/kubernetes-csi/csi-proxy/client v1.0.1 h1:BPK9e5Fy0GcDRjDc9hqu7TnouSRujG6IvbH+PXSDOsY= github.com/kubernetes-csi/csi-proxy/client v1.0.1/go.mod h1:URLOkEbRhOwKVvGvug6HSKRTpLSFuQ/Gt3xahDag8qc= -github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= -github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= +github.com/kubernetes-csi/external-snapshotter/client/v6 v6.1.0 h1:yeuon3bOuOADwiWl2CyYrU4vbmYbAzGLCTscE1yLNHk= +github.com/kubernetes-csi/external-snapshotter/client/v6 v6.1.0/go.mod h1:eVY6gNtSrhsblGAqKFDG3CrkCLFAjsDvOpPpt+EaS6k= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= @@ -606,14 +606,14 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3C github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware-tanzu/vm-operator-api v0.1.4-0.20211202183846-992b48c128ae h1:R7ukgIC/uN4vULAvwWJxuq2XLcUEJkR4psxdRNssqSI= github.com/vmware-tanzu/vm-operator-api v0.1.4-0.20211202183846-992b48c128ae/go.mod h1:mubK0QMyaA2TbeAmGsu2GVfiqDFppNUAUqoMPoKFgzM= -github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= -github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= +github.com/vmware/govmomi v0.32.0 h1:Rsdi/HAX5Ebf9Byp/FvBir4sfM7yP5DBUeRlbC6vLBo= +github.com/vmware/govmomi v0.32.0/go.mod h1:JA63Pg0SgQcSjk+LuPzjh3rJdcWBo/ZNCIwbb1qf2/0= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -634,7 +634,7 @@ go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelr go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0/go.mod h1:DQYkU9srMFqLUTVA/7/WlRHdnYDB7wyMMlle2ktMjfI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/propagators/b3 v1.10.0 h1:6AD2VV8edRdEYNaD8cNckpzgdMLU2kbV9OYyxt2kvCg= @@ -686,7 +686,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -724,7 +723,6 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -774,8 +772,7 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -874,16 +871,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -896,14 +891,12 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1116,69 +1109,66 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.10 h1:skTnrDR0r8dg4MMLf6YZIzugxNM0BjFsWKPkNc5kOvk= -k8s.io/api v0.26.10/go.mod h1:ou/H3yviqrHtP/DSPVTfsc7qNfmU06OhajytJfYXkXw= -k8s.io/apiextensions-apiserver v0.26.10 h1:wAriTUc6l7gUqJKOxhmXnYo/VNJzk4oh4QLCUR4Uq+k= -k8s.io/apiextensions-apiserver v0.26.10/go.mod h1:N2qhlxkhJLSoC4f0M1/1lNG627b45SYqnOPEVFoQXw4= -k8s.io/apimachinery v0.26.10 h1:aE+J2KIbjctFqPp3Y0q4Wh2PD+l1p2g3Zp4UYjSvtGU= -k8s.io/apimachinery v0.26.10/go.mod h1:iT1ZP4JBP34wwM+ZQ8ByPEQ81u043iqAcsJYftX9amM= -k8s.io/apiserver v0.26.10 h1:gradpIHygzZN87yK+o6V3gpbCSF78HZ0hejLZQQwdDs= -k8s.io/apiserver v0.26.10/go.mod h1:TGrQKQWUfQcotK3P4TtoVZxXOWklFF36QZlA5wufLs4= -k8s.io/cli-runtime v0.26.10 h1:a5t8ejLCCjWBEny70uMDyPfOyOJH1qAxrrEo2a9fopU= -k8s.io/cli-runtime v0.26.10/go.mod h1:i1UCYrl+n32ej4N2n2eacOMv4T94vRL0/ooOLopN23Q= -k8s.io/client-go v0.26.10 h1:4mDzl+1IrfRxh4Ro0s65JRGJp14w77gSMUTjACYWVRo= -k8s.io/client-go v0.26.10/go.mod h1:sh74ig838gCckU4ElYclWb24lTesPdEDPnlyg5vcbkA= -k8s.io/cloud-provider v0.26.10 h1:KEKR5IN508u6qKTIp8hiQshdwjp2vAmUf1dq00YeqwE= -k8s.io/cloud-provider v0.26.10/go.mod h1:s8jaxZgFcipPVnGMxLzWbCG46BYK8ExpBaqMjtUswVg= -k8s.io/code-generator v0.26.10/go.mod h1:+IHzChHYqL6v5M5KVRglocWMzdSzH3I2jRXZK05yZ9I= -k8s.io/component-base v0.26.10 h1:vl3Gfe5aC09mNxfnQtTng7u3rnBVrShOK3MAkqEleb0= -k8s.io/component-base v0.26.10/go.mod h1:/IDdENUHG5uGxqcofZajovYXE9KSPzJ4yQbkYQt7oN0= -k8s.io/component-helpers v0.26.10 h1:KEwLNxzTE65R2kNz4UZ26h1G9O8xd6+iXVz7jkLgEYc= -k8s.io/component-helpers v0.26.10/go.mod h1:HYtL0UXL9zrYuuAmweYvHX/iQ0d0MURnvTOL3emC/r0= -k8s.io/cri-api v0.26.10 h1:BZo9LDNLH7FWKgnvA8RuEPwCYfB98uf2RvPnGZiaTI4= -k8s.io/cri-api v0.26.10/go.mod h1:1wfVwvQwwIrdc9+vf11oN+uAhpFO3A5X1CMLU5MShfk= -k8s.io/csi-translation-lib v0.26.10 h1:YWshcixVgN9kJPCf/EYkEZ/KQnojSEagdXNyBb8TJ+A= -k8s.io/csi-translation-lib v0.26.10/go.mod h1:qx+y88RGweEWgYpmU5HwnQA69NzguWAGWHlgY+L7rg0= -k8s.io/dynamic-resource-allocation v0.26.10 h1:w+m8eEq7Ududi1r7LTlXfzl1C/aXS/7sMLlFdpCd6v4= -k8s.io/dynamic-resource-allocation v0.26.10/go.mod h1:piJ6x7/p0cb+xJIuX/MnGO+bKKX1QOErxY+Z1PgerrU= +k8s.io/api v0.26.8 h1:k2OtFmQPWfDUyAuYAwQPftVygF/vz4BMGSKnd15iddM= +k8s.io/api v0.26.8/go.mod h1:QaflR7cmG3V9lIz0VLBM+ylndNN897OAUAoJDcgwiQw= +k8s.io/apiextensions-apiserver v0.26.8 h1:ESVQ22MH6YfcpflpZMIvkgnHs/EwOgKKSCkS9AfxJOY= +k8s.io/apiextensions-apiserver v0.26.8/go.mod h1:ySo6rPc9ulNtKoZczw7ljCAdZN3DbyxLNat8wuYk4r8= +k8s.io/apimachinery v0.26.8 h1:SzpGtRX3/j/Ylg8Eg65Iobpxi9Jz4vOvI0qcBZyPVrM= +k8s.io/apimachinery v0.26.8/go.mod h1:qYzLkrQ9lhrZRh0jNKo2cfvf/R1/kQONnSiyB7NUJU0= +k8s.io/apiserver v0.26.8 h1:N6y2rVkMo4q+ZJWjQOYYIPY/jlxqiNFsiAsrB6JjsoA= +k8s.io/apiserver v0.26.8/go.mod h1:rQ3thye841vuya4oxnvmPV6ZjlrJP3Ru7vEXRF/lAk8= +k8s.io/cli-runtime v0.26.8 h1:LFiS+z20j8gt9Iyo4EsbivzrDYPRbFFj8wmpwdhy7cQ= +k8s.io/cli-runtime v0.26.8/go.mod h1:j3YQ0OtQnqsQRsMWbmZrKqbOvN2OUu0K+dPffeKPVj0= +k8s.io/client-go v0.26.8 h1:pPuTYaVtLlg/7n6rqs3MsKLi4XgNaJ3rTMyS37Y5CKU= +k8s.io/client-go v0.26.8/go.mod h1:1sBQqKmdy9rWZYQnoedpc0gnRXG7kU3HrKZvBe2QbGM= +k8s.io/cloud-provider v0.26.8 h1:4Oittsb5SE1wraL14nwETNuXWNf3tQ6I/bh6P2zWCWU= +k8s.io/cloud-provider v0.26.8/go.mod h1:GKhJRm28XLNjEo9B2d1PP/IIhOjMS+f8PJTbhmqtpmA= +k8s.io/component-base v0.26.8 h1:j+W9y9id4CLW85+5GhRMgcYLaezw6bK+ZQ2eN3uZtJc= +k8s.io/component-base v0.26.8/go.mod h1:tOQmHjTJBLjzWLWqbxz7sVgX9XMMphEcy0tWhk+u2BI= +k8s.io/component-helpers v0.26.8 h1:y/gAdhXvJbY+lMxbShv51v3R8NhFlff7eFCGgGZfaoE= +k8s.io/component-helpers v0.26.8/go.mod h1:YvEk4fl8eROxoHfdQKyJ3TFrhg23i7juIy85beISmEA= +k8s.io/cri-api v0.26.8 h1:juRXMjW5IMFvtFi6qHS+q3cPdCvlWU1AQrwSISmiUF0= +k8s.io/cri-api v0.26.8/go.mod h1:xhhEy2sS6zO8oieIwGBorQLneLMLHXATFUv8+0tsFMo= +k8s.io/csi-translation-lib v0.26.8 h1:L6VDEnUvh4ydvbFEOUJYEB6kqP59YMpn4njBJERyHM0= +k8s.io/csi-translation-lib v0.26.8/go.mod h1:0/s9rOLiTZaSt/ci7jNjqnLmffmuk78N2mXY5iVEOQs= +k8s.io/dynamic-resource-allocation v0.26.8 h1:gQFgfRSGT+GgeiSR99gOUVR169RPmrKRjcDCCSkwVKo= +k8s.io/dynamic-resource-allocation v0.26.8/go.mod h1:sRrVsAvdxCY0xhGp2uTOeyhVyBgdH81ZHAjVLEzzI9E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-scheduler v0.26.10 h1:icjlZENawDPsbJcDe2qro4RxcSsGlAKmJ7dSn0LTxvU= -k8s.io/kube-scheduler v0.26.10/go.mod h1:2iZjOo9wp9clwLcdYj+nUFtw5uBn1Ec25BXwy5da0us= -k8s.io/kubectl v0.26.10 h1:UoHA2Apb/Ack+B3evJjokbQ1shq6WdAmVi9AtWiY1B8= -k8s.io/kubectl v0.26.10/go.mod h1:U8Zb+jkWVI3H/LSbCDHQ0d70uYmOJtNQk9V2fmg7tGw= -k8s.io/kubelet v0.26.10 h1:/ChL4fCohFNPEvZbpt6qFXMrwFgLw0dgRaseMQ1wehw= -k8s.io/kubelet v0.26.10/go.mod h1:CptPtpIILi3Z0Z2522hMBF+gnDW/rwWGTg3fteoK0Qk= -k8s.io/kubernetes v1.26.10 h1:0px6+62d5Z3pcRPYl3Fc00t3W7BtBjqkjcRarp597Lk= -k8s.io/kubernetes v1.26.10/go.mod h1:FJGPRZLL8WHUDq5XAPs4Ut4jCB0f08R7MKTRP8CGpvI= -k8s.io/legacy-cloud-providers v0.26.10 h1:FagS1MXyfunYHTr/6wvkMlSpEyU9tdpG2p1gxDdcKzU= -k8s.io/mount-utils v0.24.6 h1:HbA+5ZqWEqYN0SiExYQd4jJp44t8z77KCLy0ipG3umE= -k8s.io/mount-utils v0.24.6/go.mod h1:XrSqB3a2e8sq+aU+rlbcBtQ3EgcuDk5RP9ZsGxjoDrI= -k8s.io/pod-security-admission v0.26.10 h1:D2MF9JbMRu3pB7Onx26DHm6MHJRh3s6ZK0UKoRRD2to= -k8s.io/pod-security-admission v0.26.10/go.mod h1:AurbRHBkqh8GSj+nDgsY0NLefkiGCmZJbzMJXQZpte8= -k8s.io/sample-controller v0.26.10 h1:XTOMLTFPvyBGVtsxca8uvDa6Jtv1nj6VXjZFkBFpmdk= -k8s.io/sample-controller v0.26.10/go.mod h1:M+4is0MHZadFviLmh5c9ST1uL9KuzmtKXeKJbuP9sQE= +k8s.io/kube-scheduler v0.26.8 h1:XoIQKwhLTlBhH6/jNAiRG9X+ByoRTauJSnQYxbXM/u4= +k8s.io/kube-scheduler v0.26.8/go.mod h1:cb11un5V87utfgpgHNrD40z+ku2P+hk2CgSKcsSEq2E= +k8s.io/kubectl v0.26.8 h1:8252xsEUAlK1K0J1w+8pE8k/Xl4b4p1OC7S9Ib0AQxU= +k8s.io/kubectl v0.26.8/go.mod h1:zqblts62fYhUOeWKwNHr2KAh4Bf8TnTsbWKTXilELJQ= +k8s.io/kubelet v0.26.8 h1:jLIhS8grZdSIDqj/ieLShyA5TqCA1z4WTBFQzGtPzQ4= +k8s.io/kubelet v0.26.8/go.mod h1:Jg66XubcTV+s+Jn/vQSD0rvdk3z6oaM63EnbHwkmNRY= +k8s.io/kubernetes v1.26.8 h1:vC3oBFD2H8A1c7L0WFMWKQYN5xRJy93QOCoQWNe1CF8= +k8s.io/kubernetes v1.26.8/go.mod h1:EBE8dfGfk2sZ3yzZVQjr1wQ/k28/wwaajL/1+77Cjmg= +k8s.io/legacy-cloud-providers v0.26.8 h1:Py0NBzxvf2xyW1Tecg5Of9cnl74eid7rHn+FpvkEaMI= +k8s.io/mount-utils v0.27.1 h1:RSd0wslbIuwLRaGGNAGMZ3m9FLcvukxJ3FWlOm76W2A= +k8s.io/mount-utils v0.27.1/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= +k8s.io/pod-security-admission v0.26.8 h1:HnCZk8Gz83xFp25nIJkUPaYg0NV3sFCTAqGmJiJlq+4= +k8s.io/pod-security-admission v0.26.8/go.mod h1:gzThscfIEyA1dPlCEuNoxWjo4C4QsqDI54rBA6x31MM= +k8s.io/sample-controller v0.26.8 h1:CIf+qcrMydceeqMaR5hEq6DwVktRDv/0aPuwNlINxE8= +k8s.io/sample-controller v0.26.8/go.mod h1:MdTluwLYkXIbo95OSollInsKR1ppbtdi01Nme09nr8g= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 h1:fAPTNEpzQMOLMGwOHNbUkR2xXTQwMJOZYNx+/mLlOh0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37/go.mod h1:vfnxT4FXNT8eGvO+xi/DsyC/qHmdujqwrUa1WSspCsk= -sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= -sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-runtime v0.14.2 h1:P6IwDhbsRWsBClt/8/h8Zy36bCuGuW5Op7MHpFrN/60= +sigs.k8s.io/controller-runtime v0.14.2/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= diff --git a/hack/release.sh b/hack/release.sh index 4a57241c53..d45498eeb5 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -56,8 +56,8 @@ BUILD_RELEASE_TYPE="${BUILD_RELEASE_TYPE:-}" # CUSTOM_REPO_FOR_GOLANG can be used to pass custom repository for golang builder image. # Please ensure it ends with a '/'. -# Example: CUSTOM_REPO_FOR_GOLANG=harbor-repo.vmware.com/dockerhub-proxy-cache/library/ -GOLANG_IMAGE=${CUSTOM_REPO_FOR_GOLANG:-}golang:1.19 +# Example: CUSTOM_REPO_FOR_GOLANG=/dockerhub-proxy-cache/library/ +GOLANG_IMAGE=${CUSTOM_REPO_FOR_GOLANG:-}golang:1.20 ARCH=amd64 OSVERSION=1809 diff --git a/hack/run-e2e-test.sh b/hack/run-e2e-test.sh index 74d3fe11a9..caaaebe855 100755 --- a/hack/run-e2e-test.sh +++ b/hack/run-e2e-test.sh @@ -68,6 +68,10 @@ then fi OPTS+=(-p) ginkgo -mod=mod "${OPTS[@]}" --focus="csi-block-vanilla-parallelized" tests/e2e +elif [ "$FOCUS" == "csi-block-vanilla-parallelized" ] +then + OPTS+=(-p) + ginkgo -mod=mod "${OPTS[@]}" --focus="csi-block-vanilla-parallelized" tests/e2e else ginkgo -mod=mod "${OPTS[@]}" --focus="$FOCUS" tests/e2e fi diff --git a/images/ci/Dockerfile b/images/ci/Dockerfile index 2a44b5e964..dff464355d 100644 --- a/images/ci/Dockerfile +++ b/images/ci/Dockerfile @@ -17,7 +17,7 @@ ################################################################################ # The golang image is used to create the project's module and build caches # and is also the image on which this image is based. -ARG GOLANG_IMAGE=golang:1.19 +ARG GOLANG_IMAGE=golang:1.20 ################################################################################ ## GO MOD CACHE STAGE ## diff --git a/images/ci/e2e/Dockerfile b/images/ci/e2e/Dockerfile new file mode 100644 index 0000000000..6b45b97921 --- /dev/null +++ b/images/ci/e2e/Dockerfile @@ -0,0 +1,32 @@ +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# To build an image from this docker file follow the below steps +# cd to images/ci/e2e folder +# docker build -t --platform=Linux/x86_64 -f Dockerfile . +# docker tag / +# docker push / + +FROM --platform=linux/amd64 golang:1.19 + +RUN apt-get update && curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \ + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + +RUN echo "Downloading and installing govc..." && \ + wget -qO- https://github.com/vmware/govmomi/releases/download/v0.30.2/govc_Linux_x86_64.tar.gz | tar -C /usr/local/bin -xvzf - govc && \ + chmod +x /usr/local/bin/govc + +RUN echo "Updating and installing sshpass..." && \ + apt-get update -y && \ + apt-get install -y sshpass diff --git a/images/driver/Dockerfile b/images/driver/Dockerfile index 201d7957fa..5779ac1c77 100644 --- a/images/driver/Dockerfile +++ b/images/driver/Dockerfile @@ -16,7 +16,7 @@ ## BUILD ARGS ## ################################################################################ # This build arg allows the specification of a custom Golang image. -ARG GOLANG_IMAGE=golang:1.19 +ARG GOLANG_IMAGE=golang:1.20 # This build arg allows the specification of a custom base image. ARG BASE_IMAGE=gcr.io/cloud-provider-vsphere/extra/csi-driver-base:latest diff --git a/images/syncer/Dockerfile b/images/syncer/Dockerfile index 7e64be826f..6b05236070 100644 --- a/images/syncer/Dockerfile +++ b/images/syncer/Dockerfile @@ -14,7 +14,7 @@ ## BUILD ARGS ## ################################################################################ # This build arg allows the specification of a custom Golang image. -ARG GOLANG_IMAGE=golang:1.19 +ARG GOLANG_IMAGE=golang:1.20 # This build arg allows the specification of a custom base image. ARG BASE_IMAGE=gcr.io/cloud-provider-vsphere/extra/csi-driver-base:latest diff --git a/images/windows/driver/Dockerfile b/images/windows/driver/Dockerfile index 5edd043928..430b8bbf41 100644 --- a/images/windows/driver/Dockerfile +++ b/images/windows/driver/Dockerfile @@ -16,7 +16,7 @@ ## BUILD ARGS ## ################################################################################ # This build arg allows the specification of a custom Golang image. -ARG GOLANG_IMAGE=golang:1.19 +ARG GOLANG_IMAGE=golang:1.20 ARG OSVERSION ARG ARCH=amd64 diff --git a/manifests/guestcluster/1.22/pvcsi.yaml b/manifests/guestcluster/1.22/pvcsi.yaml index acb5c867d6..9c5f1bd43c 100644 --- a/manifests/guestcluster/1.22/pvcsi.yaml +++ b/manifests/guestcluster/1.22/pvcsi.yaml @@ -62,7 +62,7 @@ rules: verbs: ["get", "update", "watch", "list"] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots" ] - verbs: [ "get", "list" ] + verbs: [ "get", "list", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotclasses" ] verbs: [ "watch", "get", "list" ] diff --git a/manifests/guestcluster/1.23/pvcsi.yaml b/manifests/guestcluster/1.23/pvcsi.yaml index 309a2b6b0b..4b8fcbd2f1 100644 --- a/manifests/guestcluster/1.23/pvcsi.yaml +++ b/manifests/guestcluster/1.23/pvcsi.yaml @@ -62,7 +62,7 @@ rules: verbs: ["get", "update", "watch", "list"] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots" ] - verbs: [ "get", "list" ] + verbs: [ "get", "list", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotclasses" ] verbs: [ "watch", "get", "list" ] @@ -208,6 +208,8 @@ spec: value: /etc/cloud/pvcsi-config/cns-csi.conf - name: PROVISION_TIMEOUT_MINUTES value: "4" + - name: SNAPSHOT_TIMEOUT_MINUTES + value: "4" - name: ATTACHER_TIMEOUT_MINUTES value: "4" - name: RESIZE_TIMEOUT_MINUTES diff --git a/manifests/guestcluster/1.24/pvcsi.yaml b/manifests/guestcluster/1.24/pvcsi.yaml index 68f2d449d8..6516505eb2 100644 --- a/manifests/guestcluster/1.24/pvcsi.yaml +++ b/manifests/guestcluster/1.24/pvcsi.yaml @@ -62,7 +62,7 @@ rules: verbs: ["get", "update", "watch", "list"] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots" ] - verbs: [ "get", "list" ] + verbs: [ "get", "list", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotclasses" ] verbs: [ "watch", "get", "list" ] @@ -211,6 +211,8 @@ spec: value: /etc/cloud/pvcsi-config/cns-csi.conf - name: PROVISION_TIMEOUT_MINUTES value: "4" + - name: SNAPSHOT_TIMEOUT_MINUTES + value: "4" - name: ATTACHER_TIMEOUT_MINUTES value: "4" - name: RESIZE_TIMEOUT_MINUTES @@ -229,6 +231,10 @@ spec: value: {{ .PVCSINamespace }} - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT value: 3m + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -265,6 +271,10 @@ spec: value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - name: CSI_NAMESPACE value: {{ .PVCSINamespace }} + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -322,6 +332,24 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir + - name: csi-snapshotter + image: vmware.io/csi-snapshotter/csi-snapshotter: + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir volumes: - name: pvcsi-provider-volume secret: @@ -493,7 +521,7 @@ data: "online-volume-extend": "true" "file-volume": "true" "csi-sv-feature-states-replication": "false" - "block-volume-snapshot": "false" + "block-volume-snapshot": "true" "tkgs-ha": "true" "cnsmgr-suspend-create-volume": "true" kind: ConfigMap diff --git a/manifests/guestcluster/1.25/pvcsi.yaml b/manifests/guestcluster/1.25/pvcsi.yaml index e0dd975276..e591f5c456 100644 --- a/manifests/guestcluster/1.25/pvcsi.yaml +++ b/manifests/guestcluster/1.25/pvcsi.yaml @@ -243,6 +243,10 @@ spec: value: {{ .PVCSINamespace }} - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT value: 3m + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -279,6 +283,10 @@ spec: value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - name: CSI_NAMESPACE value: {{ .PVCSINamespace }} + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -336,6 +344,24 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir + - name: csi-snapshotter + image: vmware.io/csi-snapshotter/csi-snapshotter: + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir volumes: - name: pvcsi-provider-volume secret: @@ -505,7 +531,7 @@ data: "online-volume-extend": "true" "file-volume": "true" "csi-sv-feature-states-replication": "false" # Do not enable for guest cluster, Refer PR#2386 for details - "block-volume-snapshot": "false" + "block-volume-snapshot": "true" "tkgs-ha": "true" "cnsmgr-suspend-create-volume": "true" kind: ConfigMap diff --git a/manifests/guestcluster/1.26/pvcsi.yaml b/manifests/guestcluster/1.26/pvcsi.yaml index e0dd975276..e591f5c456 100644 --- a/manifests/guestcluster/1.26/pvcsi.yaml +++ b/manifests/guestcluster/1.26/pvcsi.yaml @@ -243,6 +243,10 @@ spec: value: {{ .PVCSINamespace }} - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT value: 3m + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -279,6 +283,10 @@ spec: value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - name: CSI_NAMESPACE value: {{ .PVCSINamespace }} + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -336,6 +344,24 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir + - name: csi-snapshotter + image: vmware.io/csi-snapshotter/csi-snapshotter: + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir volumes: - name: pvcsi-provider-volume secret: @@ -505,7 +531,7 @@ data: "online-volume-extend": "true" "file-volume": "true" "csi-sv-feature-states-replication": "false" # Do not enable for guest cluster, Refer PR#2386 for details - "block-volume-snapshot": "false" + "block-volume-snapshot": "true" "tkgs-ha": "true" "cnsmgr-suspend-create-volume": "true" kind: ConfigMap diff --git a/manifests/guestcluster/1.27/pvcsi.yaml b/manifests/guestcluster/1.27/pvcsi.yaml index e0dd975276..e591f5c456 100644 --- a/manifests/guestcluster/1.27/pvcsi.yaml +++ b/manifests/guestcluster/1.27/pvcsi.yaml @@ -243,6 +243,10 @@ spec: value: {{ .PVCSINamespace }} - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT value: 3m + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -279,6 +283,10 @@ spec: value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION - name: CSI_NAMESPACE value: {{ .PVCSINamespace }} + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud/pvcsi-provider name: pvcsi-provider-volume @@ -336,6 +344,24 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir + - name: csi-snapshotter + image: vmware.io/csi-snapshotter/csi-snapshotter: + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir volumes: - name: pvcsi-provider-volume secret: @@ -505,7 +531,7 @@ data: "online-volume-extend": "true" "file-volume": "true" "csi-sv-feature-states-replication": "false" # Do not enable for guest cluster, Refer PR#2386 for details - "block-volume-snapshot": "false" + "block-volume-snapshot": "true" "tkgs-ha": "true" "cnsmgr-suspend-create-volume": "true" kind: ConfigMap diff --git a/manifests/supervisorcluster/1.21/cns-csi.yaml b/manifests/supervisorcluster/1.21/cns-csi.yaml index f682aa4319..c64e35fdfb 100644 --- a/manifests/supervisorcluster/1.21/cns-csi.yaml +++ b/manifests/supervisorcluster/1.21/cns-csi.yaml @@ -503,9 +503,14 @@ webhooks: rules: - apiGroups: [""] apiVersions: ["v1", "v1beta1"] - operations: ["CREATE", "UPDATE"] + operations: ["CREATE", "UPDATE", "DELETE"] resources: ["persistentvolumeclaims"] scope: "Namespaced" + - apiGroups: ["snapshot.storage.k8s.io"] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["volumesnapshots"] + scope: "Namespaced" sideEffects: None admissionReviewVersions: ["v1"] failurePolicy: Fail @@ -518,6 +523,9 @@ rules: - apiGroups: [""] resources: ["persistentvolumes", "persistentvolumeclaims"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/manifests/supervisorcluster/1.22/cns-csi.yaml b/manifests/supervisorcluster/1.22/cns-csi.yaml index 1958cf6249..4ea49ee18b 100644 --- a/manifests/supervisorcluster/1.22/cns-csi.yaml +++ b/manifests/supervisorcluster/1.22/cns-csi.yaml @@ -80,7 +80,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots" ] - verbs: [ "get", "list" ] + verbs: [ "get", "list", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotclasses" ] verbs: [ "watch", "get", "list" ] @@ -512,9 +512,14 @@ webhooks: rules: - apiGroups: [""] apiVersions: ["v1", "v1beta1"] - operations: ["CREATE", "UPDATE"] + operations: ["CREATE", "UPDATE", "DELETE"] resources: ["persistentvolumeclaims"] scope: "Namespaced" + - apiGroups: ["snapshot.storage.k8s.io"] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["volumesnapshots"] + scope: "Namespaced" sideEffects: None admissionReviewVersions: ["v1"] failurePolicy: Fail @@ -527,6 +532,9 @@ rules: - apiGroups: [""] resources: ["persistentvolumes", "persistentvolumeclaims"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/manifests/supervisorcluster/1.23/cns-csi.yaml b/manifests/supervisorcluster/1.23/cns-csi.yaml index 1ea3ce9d0c..3689c52c50 100644 --- a/manifests/supervisorcluster/1.23/cns-csi.yaml +++ b/manifests/supervisorcluster/1.23/cns-csi.yaml @@ -80,7 +80,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots" ] - verbs: [ "get", "list" ] + verbs: [ "get", "list", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotclasses" ] verbs: [ "watch", "get", "list" ] @@ -213,7 +213,7 @@ spec: priorityClassName: system-node-critical containers: - name: csi-provisioner - image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.3.0_vmware.1 + image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.4.0_vmware.1 args: - "--v=4" - "--timeout=300s" @@ -248,7 +248,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-attacher - image: localhost:5000/vmware.io/csi-attacher:v4.0.0_vmware.1 + image: localhost:5000/vmware.io/csi-attacher:v4.3.0_vmware.1 args: - "--v=4" - "--timeout=300s" @@ -272,7 +272,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-resizer - image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.6.0_vmware.1 + image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.8.0_vmware.1 imagePullPolicy: IfNotPresent args: - --v=4 @@ -346,7 +346,7 @@ spec: - mountPath: /etc/vmware/wcp/tls/ name: host-vmca - name: liveness-probe - image: localhost:5000/vmware.io/csi-livenessprobe:v2.7.0_vmware.1 + image: localhost:5000/vmware.io/csi-livenessprobe:v2.10.0_vmware.1 args: - "--csi-address=/csi/csi.sock" volumeMounts: @@ -395,6 +395,23 @@ spec: readOnly: true - mountPath: /etc/vmware/wcp/tls/ name: host-vmca + - name: csi-snapshotter + image: localhost:5000/vmware.io/csi-snapshotter:v6.1.0_vmware.2 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--extra-create-metadata" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /csi + name: socket-dir volumes: - name: vsphere-config-volume secret: @@ -426,12 +443,12 @@ data: "fake-attach": "true" "async-query-volume": "true" "improved-csi-idempotency": "true" - "block-volume-snapshot": "false" + "block-volume-snapshot": "true" "sibling-replica-bound-pvc-check": "true" "tkgs-ha": "true" - "list-volumes": "false" + "list-volumes": "true" "cnsmgr-suspend-create-volume": "true" - "listview-tasks": "false" + "listview-tasks": "true" kind: ConfigMap metadata: name: csi-feature-states @@ -516,9 +533,14 @@ webhooks: rules: - apiGroups: [""] apiVersions: ["v1", "v1beta1"] - operations: ["CREATE", "UPDATE"] + operations: ["CREATE", "UPDATE", "DELETE"] resources: ["persistentvolumeclaims"] scope: "Namespaced" + - apiGroups: ["snapshot.storage.k8s.io"] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["volumesnapshots"] + scope: "Namespaced" sideEffects: None admissionReviewVersions: ["v1"] failurePolicy: Fail @@ -531,6 +553,9 @@ rules: - apiGroups: [""] resources: ["persistentvolumes", "persistentvolumeclaims"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/manifests/supervisorcluster/1.23/kustomization.yaml b/manifests/supervisorcluster/1.23/kustomization.yaml new file mode 100644 index 0000000000..fb9558639f --- /dev/null +++ b/manifests/supervisorcluster/1.23/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - cns-csi.yaml diff --git a/manifests/supervisorcluster/1.24/cns-csi.yaml b/manifests/supervisorcluster/1.24/cns-csi.yaml index 4dc581f98b..4d0f8c6821 100644 --- a/manifests/supervisorcluster/1.24/cns-csi.yaml +++ b/manifests/supervisorcluster/1.24/cns-csi.yaml @@ -80,7 +80,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots" ] - verbs: [ "get", "list" ] + verbs: [ "get", "list", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotclasses" ] verbs: [ "watch", "get", "list" ] @@ -216,7 +216,7 @@ spec: priorityClassName: system-node-critical containers: - name: csi-provisioner - image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.3.0_vmware.1 + image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.4.0_vmware.1 args: - "--v=4" - "--timeout=300s" @@ -251,7 +251,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-attacher - image: localhost:5000/vmware.io/csi-attacher:v4.0.0_vmware.1 + image: localhost:5000/vmware.io/csi-attacher:v4.3.0_vmware.1 args: - "--v=4" - "--timeout=300s" @@ -275,7 +275,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-resizer - image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.6.0_vmware.1 + image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.8.0_vmware.1 imagePullPolicy: IfNotPresent args: - --v=4 @@ -349,7 +349,7 @@ spec: - mountPath: /etc/vmware/wcp/tls/ name: host-vmca - name: liveness-probe - image: localhost:5000/vmware.io/csi-livenessprobe:v2.7.0_vmware.1 + image: localhost:5000/vmware.io/csi-livenessprobe:v2.10.0_vmware.1 args: - "--csi-address=/csi/csi.sock" volumeMounts: @@ -398,6 +398,23 @@ spec: readOnly: true - mountPath: /etc/vmware/wcp/tls/ name: host-vmca + - name: csi-snapshotter + image: localhost:5000/vmware.io/csi-snapshotter:v6.1.0_vmware.2 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--extra-create-metadata" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /csi + name: socket-dir volumes: - name: vsphere-config-volume secret: @@ -429,12 +446,12 @@ data: "fake-attach": "true" "async-query-volume": "true" "improved-csi-idempotency": "true" - "block-volume-snapshot": "false" + "block-volume-snapshot": "true" "sibling-replica-bound-pvc-check": "true" "tkgs-ha": "true" - "list-volumes": "false" + "list-volumes": "true" "cnsmgr-suspend-create-volume": "true" - "listview-tasks": "false" + "listview-tasks": "true" kind: ConfigMap metadata: name: csi-feature-states @@ -519,9 +536,14 @@ webhooks: rules: - apiGroups: [""] apiVersions: ["v1", "v1beta1"] - operations: ["CREATE", "UPDATE"] + operations: ["CREATE", "UPDATE", "DELETE"] resources: ["persistentvolumeclaims"] scope: "Namespaced" + - apiGroups: ["snapshot.storage.k8s.io"] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["volumesnapshots"] + scope: "Namespaced" sideEffects: None admissionReviewVersions: ["v1"] failurePolicy: Fail @@ -534,6 +556,9 @@ rules: - apiGroups: [""] resources: ["persistentvolumes", "persistentvolumeclaims"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/manifests/supervisorcluster/1.24/kustomization.yaml b/manifests/supervisorcluster/1.24/kustomization.yaml new file mode 100644 index 0000000000..fb9558639f --- /dev/null +++ b/manifests/supervisorcluster/1.24/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - cns-csi.yaml diff --git a/manifests/supervisorcluster/1.25/cns-csi.yaml b/manifests/supervisorcluster/1.25/cns-csi.yaml index fe0250dd63..e38a5cc0d4 100644 --- a/manifests/supervisorcluster/1.25/cns-csi.yaml +++ b/manifests/supervisorcluster/1.25/cns-csi.yaml @@ -80,7 +80,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshots" ] - verbs: [ "get", "list" ] + verbs: [ "get", "list", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotclasses" ] verbs: [ "watch", "get", "list" ] @@ -213,7 +213,7 @@ spec: priorityClassName: system-node-critical containers: - name: csi-provisioner - image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.3.0_vmware.1 + image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.4.0_vmware.1 args: - "--v=4" - "--timeout=300s" @@ -248,7 +248,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-attacher - image: localhost:5000/vmware.io/csi-attacher:v4.0.0_vmware.1 + image: localhost:5000/vmware.io/csi-attacher:v4.3.0_vmware.1 args: - "--v=4" - "--timeout=300s" @@ -272,7 +272,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-resizer - image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.6.0_vmware.1 + image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.8.0_vmware.1 imagePullPolicy: IfNotPresent args: - --v=4 @@ -346,7 +346,7 @@ spec: - mountPath: /etc/vmware/wcp/tls/ name: host-vmca - name: liveness-probe - image: localhost:5000/vmware.io/csi-livenessprobe:v2.7.0_vmware.1 + image: localhost:5000/vmware.io/csi-livenessprobe:v2.10.0_vmware.1 args: - "--csi-address=/csi/csi.sock" volumeMounts: @@ -395,6 +395,23 @@ spec: readOnly: true - mountPath: /etc/vmware/wcp/tls/ name: host-vmca + - name: csi-snapshotter + image: localhost:5000/vmware.io/csi-snapshotter:v6.1.0_vmware.2 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--extra-create-metadata" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /csi + name: socket-dir volumes: - name: vsphere-config-volume secret: @@ -426,12 +443,12 @@ data: "fake-attach": "true" "async-query-volume": "true" "improved-csi-idempotency": "true" - "block-volume-snapshot": "false" + "block-volume-snapshot": "true" "sibling-replica-bound-pvc-check": "true" "tkgs-ha": "true" - "list-volumes": "false" + "list-volumes": "true" "cnsmgr-suspend-create-volume": "true" - "listview-tasks": "false" + "listview-tasks": "true" kind: ConfigMap metadata: name: csi-feature-states @@ -516,9 +533,14 @@ webhooks: rules: - apiGroups: [""] apiVersions: ["v1", "v1beta1"] - operations: ["CREATE", "UPDATE"] + operations: ["CREATE", "UPDATE", "DELETE"] resources: ["persistentvolumeclaims"] scope: "Namespaced" + - apiGroups: ["snapshot.storage.k8s.io"] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["volumesnapshots"] + scope: "Namespaced" sideEffects: None admissionReviewVersions: ["v1"] failurePolicy: Fail @@ -531,6 +553,9 @@ rules: - apiGroups: [""] resources: ["persistentvolumes", "persistentvolumeclaims"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/manifests/supervisorcluster/1.25/kustomization.yaml b/manifests/supervisorcluster/1.25/kustomization.yaml new file mode 100644 index 0000000000..fb9558639f --- /dev/null +++ b/manifests/supervisorcluster/1.25/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - cns-csi.yaml diff --git a/manifests/supervisorcluster/1.26/cns-csi.yaml b/manifests/supervisorcluster/1.26/cns-csi.yaml new file mode 100644 index 0000000000..e38a5cc0d4 --- /dev/null +++ b/manifests/supervisorcluster/1.26/cns-csi.yaml @@ -0,0 +1,678 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-role +rules: + - apiGroups: [""] + resources: ["nodes", "pods", "configmaps", "resourcequotas", "namespaces", "services"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsnodevmattachments", "cnsvolumemetadatas", "cnsfileaccessconfigs"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnscsisvfeaturestates"] + verbs: ["create", "get", "list", "update", "watch"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsfilevolumeclients"] + verbs: ["get", "update", "create", "delete"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsregistervolumes"] + verbs: ["get", "list", "watch", "update", "delete"] + - apiGroups: ["cns.vmware.com"] + resources: ["triggercsifullsyncs"] + verbs: ["create", "get", "update", "watch", "list"] + - apiGroups: ["cns.vmware.com"] + resources: ["storagepools"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "create", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: ["vmoperator.vmware.com"] + resources: ["virtualmachines"] + verbs: ["get", "list"] + - apiGroups: ["vmware.com"] + resources: ["virtualnetworks"] + verbs: ["get"] + - apiGroups: ["netoperator.vmware.com"] + resources: ["networkinterfaces"] + verbs: ["get"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsvolumeoperationrequests"] + verbs: ["create", "get", "list", "update", "delete"] + - apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["list"] + - apiGroups: ["topology.tanzu.vmware.com"] + resources: ["availabilityzones"] + verbs: ["get", "list", "watch"] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshots" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotclasses" ] + verbs: [ "watch", "get", "list" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents" ] + verbs: [ "create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents/status" ] + verbs: [ "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi +roleRef: + kind: ClusterRole + name: vsphere-csi-controller-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csiRole + namespace: vmware-system-csi +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: wcp-privileged-psp +subjects: + # For the vmware-system-csi nodes. + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts:vmware-system-csi +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-admin-csi-role +rules: + - apiGroups: ["cns.vmware.com"] + resources: ["cnsregistervolumes"] + verbs: ["get", "list", "create", "delete", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "update", "delete"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "update", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wcp:administrators:cluster-edit-csirole +subjects: + - kind: Group + name: sso:Administrators@ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: vsphere-admin-csi-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: vmware-system-csi + name: vsphere-csi-secret-reader +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: vsphere-csi-provisioner-secret-binding + namespace: vmware-system-csi +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi +roleRef: + kind: Role + name: vsphere-csi-secret-reader + apiGroup: rbac.authorization.k8s.io +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi +spec: + replicas: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 0 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - vsphere-csi-controller + topologyKey: "kubernetes.io/hostname" + serviceAccount: vsphere-csi-controller + nodeSelector: + node-role.kubernetes.io/control-plane: '' + tolerations: + - operator: "Exists" + key: "node-role.kubernetes.io/control-plane" + effect: "NoSchedule" + hostNetwork: true + priorityClassName: system-node-critical + containers: + - name: csi-provisioner + image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.4.0_vmware.1 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--feature-gates=Topology=true" + - "--strict-topology" + - "--leader-election" + - "--enable-hostlocal-placement=true" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--default-fstype=ext4" + - "--use-service-for-placement-engine=false" + - "--tkgs-ha=true" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT + value: "29000" + - name: VSPHERE_CLOUD_OPERATOR_SERVICE_NAME # service name to be used by csi-provisioner to connect to placement engine + value: vmware-system-psp-operator-k8s-cloud-operator-service + - name: VSPHERE_CLOUD_OPERATOR_SERVICE_NAMESPACE # namespace for service name to be used by csi-provisioner to connect to placement engine + value: vmware-system-appplatform-operator-system + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-attacher + image: localhost:5000/vmware.io/csi-attacher:v4.3.0_vmware.1 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + - "--worker-threads=25" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-resizer + image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.8.0_vmware.1 + imagePullPolicy: IfNotPresent + args: + - --v=4 + - --timeout=300s + - --handle-volume-inuse-error=false # Set this to true if used in vSphere 7.0U1 + - --csi-address=$(ADDRESS) + - --leader-election + - --kube-api-qps=100 + - --kube-api-burst=100 + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: vsphere-csi-controller + image: localhost:5000/vmware/vsphere-csi: + ports: + - containerPort: 2112 + name: prometheus + protocol: TCP + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 180 + failureThreshold: 3 + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: CLUSTER_FLAVOR + value: "WORKLOAD" + - name: X_CSI_MODE + value: "controller" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + - name: POD_LISTENER_SERVICE_PORT + value: "29000" + - name: VSPHERE_CSI_CONFIG + value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "50" + - name: INCLUSTER_CLIENT_BURST + value: "50" + - name: GODEBUG + value: x509sha1=1 + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /etc/vmware/wcp + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + - mountPath: /etc/vmware/wcp/tls/ + name: host-vmca + - name: liveness-probe + image: localhost:5000/vmware.io/csi-livenessprobe:v2.10.0_vmware.1 + args: + - "--csi-address=/csi/csi.sock" + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: vsphere-syncer + image: localhost:5000/vmware/syncer: + args: + - "--leader-election" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: CLUSTER_FLAVOR + value: "WORKLOAD" + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VOLUME_HEALTH_INTERVAL_MINUTES + value: "5" + - name: POD_POLL_INTERVAL_SECONDS + value: "2" + - name: POD_LISTENER_SERVICE_PORT + value: "29000" + - name: VSPHERE_CSI_CONFIG + value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "50" + - name: INCLUSTER_CLIENT_BURST + value: "50" + - name: GODEBUG + value: x509sha1=1 + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + volumeMounts: + - mountPath: /etc/vmware/wcp + name: vsphere-config-volume + readOnly: true + - mountPath: /etc/vmware/wcp/tls/ + name: host-vmca + - name: csi-snapshotter + image: localhost:5000/vmware.io/csi-snapshotter:v6.1.0_vmware.2 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--extra-create-metadata" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - name: socket-dir + emptyDir: {} + - name: host-vmca + hostPath: + path: /etc/vmware/wcp/tls/ + type: Directory +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.vsphere.vmware.com +spec: + attachRequired: true + podInfoOnMount: false +--- +apiVersion: v1 +data: + "volume-extend": "true" + "volume-health": "true" + "online-volume-extend": "true" + "file-volume": "true" + "csi-auth-check": "true" + "trigger-csi-fullsync": "false" + "csi-sv-feature-states-replication": "true" + "fake-attach": "true" + "async-query-volume": "true" + "improved-csi-idempotency": "true" + "block-volume-snapshot": "true" + "sibling-replica-bound-pvc-check": "true" + "tkgs-ha": "true" + "list-volumes": "true" + "cnsmgr-suspend-create-volume": "true" + "listview-tasks": "true" +kind: ConfigMap +metadata: + name: csi-feature-states + namespace: vmware-system-csi +--- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + name: vmware-system-csi-webhook-service + namespace: vmware-system-csi + labels: + app: vsphere-csi-webhook +spec: + ports: + - port: 443 + targetPort: 9883 + selector: + app: vsphere-csi-webhook +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app: vsphere-csi-webhook + name: vmware-system-csi-serving-cert + namespace: vmware-system-csi +spec: + dnsNames: + - vmware-system-csi-webhook-service.vmware-system-csi.svc + - vmware-system-csi-webhook-service.vmware-system-csi.svc.cluster.local + issuerRef: + kind: Issuer + name: vmware-system-csi-selfsigned-issuer + secretName: vmware-system-csi-webhook-service-cert +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app: vsphere-csi-webhook + name: vmware-system-csi-selfsigned-issuer + namespace: vmware-system-csi +spec: + selfSigned: {} +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: vmware-system-csi-validating-webhook-configuration + labels: + app: vsphere-csi-webhook + annotations: + cert-manager.io/inject-ca-from: vmware-system-csi/vmware-system-csi-serving-cert +webhooks: + - name: validation.csi.vsphere.vmware.com + clientConfig: + service: + name: vmware-system-csi-webhook-service + namespace: vmware-system-csi + path: "/validate" + rules: + - apiGroups: [""] + apiVersions: ["v1", "v1beta1"] + operations: ["CREATE", "UPDATE", "DELETE"] + resources: ["persistentvolumeclaims"] + scope: "Namespaced" + - apiGroups: ["snapshot.storage.k8s.io"] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["volumesnapshots"] + scope: "Namespaced" + sideEffects: None + admissionReviewVersions: ["v1"] + failurePolicy: Fail +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-cluster-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-cluster-role-binding +subjects: + - kind: ServiceAccount + name: default + namespace: vmware-system-csi +roleRef: + kind: ClusterRole + name: vsphere-csi-webhook-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-role + namespace: vmware-system-csi +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-role-binding + namespace: vmware-system-csi +subjects: + - kind: ServiceAccount + name: default + namespace: vmware-system-csi +roleRef: + kind: Role + name: vsphere-csi-webhook-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-webhook + namespace: vmware-system-csi + labels: + app: vsphere-csi-webhook +spec: + replicas: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 0 + selector: + matchLabels: + app: vsphere-csi-webhook + template: + metadata: + labels: + app: vsphere-csi-webhook + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vsphere-csi-webhook + topologyKey: kubernetes.io/hostname + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/control-plane: "" + terminationGracePeriodSeconds: 10 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - effect: NoExecute + key: node.alpha.kubernetes.io/notReady + operator: Exists + - effect: NoExecute + key: node.alpha.kubernetes.io/unreachable + operator: Exists + containers: + - name: vsphere-webhook + image: localhost:5000/vmware/syncer: + args: + - "--operation-mode=WEBHOOK_SERVER" + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace=$(CSI_NAMESPACE)" + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 9883 + name: webhook-server + protocol: TCP + env: + - name: CNSCSI_WEBHOOK_SERVICE_CONTAINER_PORT + value: "9883" + - name: CLUSTER_FLAVOR + value: "WORKLOAD" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "50" + - name: INCLUSTER_CLIENT_BURST + value: "50" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: webhook-certs + readOnly: true + volumes: + - name: webhook-certs + secret: + defaultMode: 420 + secretName: vmware-system-csi-webhook-service-cert diff --git a/manifests/supervisorcluster/1.26/kustomization.yaml b/manifests/supervisorcluster/1.26/kustomization.yaml new file mode 100644 index 0000000000..fb9558639f --- /dev/null +++ b/manifests/supervisorcluster/1.26/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - cns-csi.yaml diff --git a/manifests/supervisorcluster/1.27/cns-csi.yaml b/manifests/supervisorcluster/1.27/cns-csi.yaml new file mode 100644 index 0000000000..e38a5cc0d4 --- /dev/null +++ b/manifests/supervisorcluster/1.27/cns-csi.yaml @@ -0,0 +1,678 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-role +rules: + - apiGroups: [""] + resources: ["nodes", "pods", "configmaps", "resourcequotas", "namespaces", "services"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsnodevmattachments", "cnsvolumemetadatas", "cnsfileaccessconfigs"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnscsisvfeaturestates"] + verbs: ["create", "get", "list", "update", "watch"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsfilevolumeclients"] + verbs: ["get", "update", "create", "delete"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsregistervolumes"] + verbs: ["get", "list", "watch", "update", "delete"] + - apiGroups: ["cns.vmware.com"] + resources: ["triggercsifullsyncs"] + verbs: ["create", "get", "update", "watch", "list"] + - apiGroups: ["cns.vmware.com"] + resources: ["storagepools"] + verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "create", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: ["vmoperator.vmware.com"] + resources: ["virtualmachines"] + verbs: ["get", "list"] + - apiGroups: ["vmware.com"] + resources: ["virtualnetworks"] + verbs: ["get"] + - apiGroups: ["netoperator.vmware.com"] + resources: ["networkinterfaces"] + verbs: ["get"] + - apiGroups: ["cns.vmware.com"] + resources: ["cnsvolumeoperationrequests"] + verbs: ["create", "get", "list", "update", "delete"] + - apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["list"] + - apiGroups: ["topology.tanzu.vmware.com"] + resources: ["availabilityzones"] + verbs: ["get", "list", "watch"] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshots" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotclasses" ] + verbs: [ "watch", "get", "list" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents" ] + verbs: [ "create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents/status" ] + verbs: [ "update", "patch" ] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-controller-binding +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi +roleRef: + kind: ClusterRole + name: vsphere-csi-controller-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csiRole + namespace: vmware-system-csi +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: wcp-privileged-psp +subjects: + # For the vmware-system-csi nodes. + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts:vmware-system-csi +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-admin-csi-role +rules: + - apiGroups: ["cns.vmware.com"] + resources: ["cnsregistervolumes"] + verbs: ["get", "list", "create", "delete", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "update", "delete"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "update", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wcp:administrators:cluster-edit-csirole +subjects: + - kind: Group + name: sso:Administrators@ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: vsphere-admin-csi-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: vmware-system-csi + name: vsphere-csi-secret-reader +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: vsphere-csi-provisioner-secret-binding + namespace: vmware-system-csi +subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi +roleRef: + kind: Role + name: vsphere-csi-secret-reader + apiGroup: rbac.authorization.k8s.io +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi +spec: + replicas: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 0 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - vsphere-csi-controller + topologyKey: "kubernetes.io/hostname" + serviceAccount: vsphere-csi-controller + nodeSelector: + node-role.kubernetes.io/control-plane: '' + tolerations: + - operator: "Exists" + key: "node-role.kubernetes.io/control-plane" + effect: "NoSchedule" + hostNetwork: true + priorityClassName: system-node-critical + containers: + - name: csi-provisioner + image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.4.0_vmware.1 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--feature-gates=Topology=true" + - "--strict-topology" + - "--leader-election" + - "--enable-hostlocal-placement=true" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--default-fstype=ext4" + - "--use-service-for-placement-engine=false" + - "--tkgs-ha=true" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + - name: VSPHERE_CLOUD_OPERATOR_SERVICE_PORT + value: "29000" + - name: VSPHERE_CLOUD_OPERATOR_SERVICE_NAME # service name to be used by csi-provisioner to connect to placement engine + value: vmware-system-psp-operator-k8s-cloud-operator-service + - name: VSPHERE_CLOUD_OPERATOR_SERVICE_NAMESPACE # namespace for service name to be used by csi-provisioner to connect to placement engine + value: vmware-system-appplatform-operator-system + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-attacher + image: localhost:5000/vmware.io/csi-attacher:v4.3.0_vmware.1 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + - "--worker-threads=25" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-resizer + image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.8.0_vmware.1 + imagePullPolicy: IfNotPresent + args: + - --v=4 + - --timeout=300s + - --handle-volume-inuse-error=false # Set this to true if used in vSphere 7.0U1 + - --csi-address=$(ADDRESS) + - --leader-election + - --kube-api-qps=100 + - --kube-api-burst=100 + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: ADDRESS + value: /csi/csi.sock + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: vsphere-csi-controller + image: localhost:5000/vmware/vsphere-csi: + ports: + - containerPort: 2112 + name: prometheus + protocol: TCP + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 180 + failureThreshold: 3 + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: CLUSTER_FLAVOR + value: "WORKLOAD" + - name: X_CSI_MODE + value: "controller" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + - name: POD_LISTENER_SERVICE_PORT + value: "29000" + - name: VSPHERE_CSI_CONFIG + value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "50" + - name: INCLUSTER_CLIENT_BURST + value: "50" + - name: GODEBUG + value: x509sha1=1 + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /etc/vmware/wcp + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + - mountPath: /etc/vmware/wcp/tls/ + name: host-vmca + - name: liveness-probe + image: localhost:5000/vmware.io/csi-livenessprobe:v2.10.0_vmware.1 + args: + - "--csi-address=/csi/csi.sock" + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: vsphere-syncer + image: localhost:5000/vmware/syncer: + args: + - "--leader-election" + - "--leader-election-lease-duration=120s" + - "--leader-election-renew-deadline=60s" + - "--leader-election-retry-period=30s" + env: + - name: CLUSTER_FLAVOR + value: "WORKLOAD" + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + - name: KUBERNETES_SERVICE_PORT + value: "6443" + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VOLUME_HEALTH_INTERVAL_MINUTES + value: "5" + - name: POD_POLL_INTERVAL_SECONDS + value: "2" + - name: POD_LISTENER_SERVICE_PORT + value: "29000" + - name: VSPHERE_CSI_CONFIG + value: "/etc/vmware/wcp/vsphere-cloud-provider.conf" # here vsphere-cloud-provider.conf is the name of the file used for creating secret using "--from-file" flag + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "50" + - name: INCLUSTER_CLIENT_BURST + value: "50" + - name: GODEBUG + value: x509sha1=1 + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + volumeMounts: + - mountPath: /etc/vmware/wcp + name: vsphere-config-volume + readOnly: true + - mountPath: /etc/vmware/wcp/tls/ + name: host-vmca + - name: csi-snapshotter + image: localhost:5000/vmware.io/csi-snapshotter:v6.1.0_vmware.2 + args: + - "--v=4" + - "--timeout=300s" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--kube-api-qps=100" + - "--kube-api-burst=100" + - "--extra-create-metadata" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - name: socket-dir + emptyDir: {} + - name: host-vmca + hostPath: + path: /etc/vmware/wcp/tls/ + type: Directory +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.vsphere.vmware.com +spec: + attachRequired: true + podInfoOnMount: false +--- +apiVersion: v1 +data: + "volume-extend": "true" + "volume-health": "true" + "online-volume-extend": "true" + "file-volume": "true" + "csi-auth-check": "true" + "trigger-csi-fullsync": "false" + "csi-sv-feature-states-replication": "true" + "fake-attach": "true" + "async-query-volume": "true" + "improved-csi-idempotency": "true" + "block-volume-snapshot": "true" + "sibling-replica-bound-pvc-check": "true" + "tkgs-ha": "true" + "list-volumes": "true" + "cnsmgr-suspend-create-volume": "true" + "listview-tasks": "true" +kind: ConfigMap +metadata: + name: csi-feature-states + namespace: vmware-system-csi +--- +apiVersion: v1 +kind: Service +metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + labels: + app: vsphere-csi-controller +spec: + ports: + - name: ctlr + port: 2112 + targetPort: 2112 + protocol: TCP + - name: syncer + port: 2113 + targetPort: 2113 + protocol: TCP + selector: + app: vsphere-csi-controller + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + name: vmware-system-csi-webhook-service + namespace: vmware-system-csi + labels: + app: vsphere-csi-webhook +spec: + ports: + - port: 443 + targetPort: 9883 + selector: + app: vsphere-csi-webhook +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app: vsphere-csi-webhook + name: vmware-system-csi-serving-cert + namespace: vmware-system-csi +spec: + dnsNames: + - vmware-system-csi-webhook-service.vmware-system-csi.svc + - vmware-system-csi-webhook-service.vmware-system-csi.svc.cluster.local + issuerRef: + kind: Issuer + name: vmware-system-csi-selfsigned-issuer + secretName: vmware-system-csi-webhook-service-cert +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app: vsphere-csi-webhook + name: vmware-system-csi-selfsigned-issuer + namespace: vmware-system-csi +spec: + selfSigned: {} +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: vmware-system-csi-validating-webhook-configuration + labels: + app: vsphere-csi-webhook + annotations: + cert-manager.io/inject-ca-from: vmware-system-csi/vmware-system-csi-serving-cert +webhooks: + - name: validation.csi.vsphere.vmware.com + clientConfig: + service: + name: vmware-system-csi-webhook-service + namespace: vmware-system-csi + path: "/validate" + rules: + - apiGroups: [""] + apiVersions: ["v1", "v1beta1"] + operations: ["CREATE", "UPDATE", "DELETE"] + resources: ["persistentvolumeclaims"] + scope: "Namespaced" + - apiGroups: ["snapshot.storage.k8s.io"] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["volumesnapshots"] + scope: "Namespaced" + sideEffects: None + admissionReviewVersions: ["v1"] + failurePolicy: Fail +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-cluster-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-cluster-role-binding +subjects: + - kind: ServiceAccount + name: default + namespace: vmware-system-csi +roleRef: + kind: ClusterRole + name: vsphere-csi-webhook-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-role + namespace: vmware-system-csi +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vsphere-csi-webhook-role-binding + namespace: vmware-system-csi +subjects: + - kind: ServiceAccount + name: default + namespace: vmware-system-csi +roleRef: + kind: Role + name: vsphere-csi-webhook-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-webhook + namespace: vmware-system-csi + labels: + app: vsphere-csi-webhook +spec: + replicas: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 0 + selector: + matchLabels: + app: vsphere-csi-webhook + template: + metadata: + labels: + app: vsphere-csi-webhook + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vsphere-csi-webhook + topologyKey: kubernetes.io/hostname + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/control-plane: "" + terminationGracePeriodSeconds: 10 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - effect: NoExecute + key: node.alpha.kubernetes.io/notReady + operator: Exists + - effect: NoExecute + key: node.alpha.kubernetes.io/unreachable + operator: Exists + containers: + - name: vsphere-webhook + image: localhost:5000/vmware/syncer: + args: + - "--operation-mode=WEBHOOK_SERVER" + - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" + - "--fss-namespace=$(CSI_NAMESPACE)" + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 9883 + name: webhook-server + protocol: TCP + env: + - name: CNSCSI_WEBHOOK_SERVICE_CONTAINER_PORT + value: "9883" + - name: CLUSTER_FLAVOR + value: "WORKLOAD" + - name: LOGGER_LEVEL + value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "50" + - name: INCLUSTER_CLIENT_BURST + value: "50" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: webhook-certs + readOnly: true + volumes: + - name: webhook-certs + secret: + defaultMode: 420 + secretName: vmware-system-csi-webhook-service-cert diff --git a/manifests/supervisorcluster/1.27/kustomization.yaml b/manifests/supervisorcluster/1.27/kustomization.yaml new file mode 100644 index 0000000000..fb9558639f --- /dev/null +++ b/manifests/supervisorcluster/1.27/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - cns-csi.yaml diff --git a/manifests/vanilla/csi-snapshot-validatingwebhook.yaml b/manifests/vanilla/csi-snapshot-validatingwebhook.yaml index 3ec1dd68ab..c9949a84e4 100644 --- a/manifests/vanilla/csi-snapshot-validatingwebhook.yaml +++ b/manifests/vanilla/csi-snapshot-validatingwebhook.yaml @@ -84,7 +84,7 @@ spec: serviceAccountName: snapshot-webhook containers: - name: snapshot-validation - image: k8s.gcr.io/sig-storage/snapshot-validation-webhook:v6.2.1 # change the image if you wish to use your own custom validation server image + image: registry.k8s.io/sig-storage/snapshot-validation-webhook:v6.2.2 # change the image if you wish to use your own custom validation server image imagePullPolicy: IfNotPresent args: ['--tls-cert-file=/run/secrets/tls/tls.crt', '--tls-private-key-file=/run/secrets/tls/tls.key'] ports: diff --git a/manifests/vanilla/deploy-csi-snapshot-components.sh b/manifests/vanilla/deploy-csi-snapshot-components.sh index 7a9e8ae24b..4c7ff70667 100755 --- a/manifests/vanilla/deploy-csi-snapshot-components.sh +++ b/manifests/vanilla/deploy-csi-snapshot-components.sh @@ -59,7 +59,7 @@ else exit 1 fi -qualified_version="v6.2.1" +qualified_version="v6.2.2" volumesnapshotclasses_crd="volumesnapshotclasses.snapshot.storage.k8s.io" volumesnapshotcontents_crd="volumesnapshotcontents.snapshot.storage.k8s.io" volumesnapshots_crd="volumesnapshots.snapshot.storage.k8s.io" @@ -200,7 +200,7 @@ EOF openssl req -nodes -new -x509 -keyout "${tmpdir}"/ca.key -out "${tmpdir}"/ca.crt -subj "/CN=vSphere CSI Admission Controller Webhook CA" openssl genrsa -out "${tmpdir}"/webhook-server-tls.key 2048 openssl req -new -key "${tmpdir}"/webhook-server-tls.key -subj "/CN=${service}.${namespace}.svc" -config "${tmpdir}"/server.conf \ - | openssl x509 -req -CA "${tmpdir}"/ca.crt -CAkey "${tmpdir}"/ca.key -CAcreateserial -out "${tmpdir}"/webhook-server-tls.crt -extensions v3_req -extfile "${tmpdir}"/server.conf + | openssl x509 -req -CA "${tmpdir}"/ca.crt -CAkey "${tmpdir}"/ca.key -days 180 -CAcreateserial -out "${tmpdir}"/webhook-server-tls.crt -extensions v3_req -extfile "${tmpdir}"/server.conf cat <"${tmpdir}"/webhook.config [WebHookConfig] port = "8443" @@ -237,7 +237,7 @@ spec: spec: containers: - name: csi-snapshotter - image: 'k8s.gcr.io/sig-storage/csi-snapshotter:${qualified_version}' + image: 'registry.k8s.io/sig-storage/csi-snapshotter:${qualified_version}' args: - '--v=4' - '--kube-api-qps=100' @@ -265,7 +265,7 @@ EOF check_snapshotter_sidecar(){ local found="false" local container_images - local csi_snapshotter_image="k8s.gcr.io/sig-storage/csi-snapshotter" + local csi_snapshotter_image="registry.k8s.io/sig-storage/csi-snapshotter" container_images=$(kubectl -n vmware-system-csi get deployment vsphere-csi-controller -o jsonpath='{.spec.template.spec.containers[*].image}') IFS=' ' read -r -a container_images_arr <<< "$container_images" diff --git a/manifests/vanilla/deploy-vsphere-csi-validation-webhook.sh b/manifests/vanilla/deploy-vsphere-csi-validation-webhook.sh index 5831654ab5..88ad82ccad 100755 --- a/manifests/vanilla/deploy-vsphere-csi-validation-webhook.sh +++ b/manifests/vanilla/deploy-vsphere-csi-validation-webhook.sh @@ -81,7 +81,7 @@ EOF openssl req -nodes -new -x509 -keyout "${tmpdir}"/ca.key -out "${tmpdir}"/ca.crt -subj "/CN=vSphere CSI Admission Controller Webhook CA" openssl genrsa -out "${tmpdir}"/webhook-server-tls.key 2048 openssl req -new -key "${tmpdir}"/webhook-server-tls.key -subj "/CN=${service}.${namespace}.svc" -config "${tmpdir}"/server.conf \ - | openssl x509 -req -CA "${tmpdir}"/ca.crt -CAkey "${tmpdir}"/ca.key -CAcreateserial -out "${tmpdir}"/webhook-server-tls.crt -extensions v3_req -extfile "${tmpdir}"/server.conf + | openssl x509 -req -CA "${tmpdir}"/ca.crt -CAkey "${tmpdir}"/ca.key -days 180 -CAcreateserial -out "${tmpdir}"/webhook-server-tls.crt -extensions v3_req -extfile "${tmpdir}"/server.conf cat <"${tmpdir}"/webhook.config [WebHookConfig] diff --git a/manifests/vanilla/validatingwebhook.yaml b/manifests/vanilla/validatingwebhook.yaml index a597750e6f..28fbef5d6e 100644 --- a/manifests/vanilla/validatingwebhook.yaml +++ b/manifests/vanilla/validatingwebhook.yaml @@ -134,7 +134,7 @@ spec: dnsPolicy: "Default" containers: - name: vsphere-webhook - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.0.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.1.2 args: - "--operation-mode=WEBHOOK_SERVER" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" @@ -149,6 +149,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /run/secrets/tls name: webhook-certs diff --git a/manifests/vanilla/vsphere-csi-driver.yaml b/manifests/vanilla/vsphere-csi-driver.yaml index 291b05d27d..c1f6096304 100644 --- a/manifests/vanilla/vsphere-csi-driver.yaml +++ b/manifests/vanilla/vsphere-csi-driver.yaml @@ -155,7 +155,6 @@ data: "async-query-volume": "true" "block-volume-snapshot": "true" "csi-windows-support": "true" - "use-csinode-id": "true" "list-volumes": "true" "pv-to-backingdiskobjectid-mapping": "false" "cnsmgr-suspend-create-volume": "true" @@ -163,7 +162,7 @@ data: "max-pvscsi-targets-per-vm": "true" "multi-vcenter-csi-topology": "true" "csi-internal-generated-cluster-id": "true" - "listview-tasks": "false" + "listview-tasks": "true" kind: ConfigMap metadata: name: internal-feature-states.csi.vsphere.vmware.com @@ -244,7 +243,7 @@ spec: dnsPolicy: "Default" containers: - name: csi-attacher - image: k8s.gcr.io/sig-storage/csi-attacher:v4.2.0 + image: registry.k8s.io/sig-storage/csi-attacher:v4.3.0 args: - "--v=4" - "--timeout=300s" @@ -262,7 +261,7 @@ spec: - mountPath: /csi name: socket-dir - name: csi-resizer - image: k8s.gcr.io/sig-storage/csi-resizer:v1.7.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0 args: - "--v=4" - "--timeout=300s" @@ -281,7 +280,7 @@ spec: - mountPath: /csi name: socket-dir - name: vsphere-csi-controller - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.0.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -307,6 +306,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume @@ -329,7 +332,7 @@ spec: periodSeconds: 180 failureThreshold: 3 - name: liveness-probe - image: k8s.gcr.io/sig-storage/livenessprobe:v2.9.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 args: - "--v=4" - "--csi-address=/csi/csi.sock" @@ -337,12 +340,12 @@ spec: - name: socket-dir mountPath: /csi - name: vsphere-syncer - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.0.2 + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.1.2 args: - "--leader-election" - - "--leader-election-lease-duration=120s" - - "--leader-election-renew-deadline=60s" - - "--leader-election-retry-period=30s" + - "--leader-election-lease-duration=30s" + - "--leader-election-renew-deadline=20s" + - "--leader-election-retry-period=10s" - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" imagePullPolicy: "Always" @@ -367,12 +370,16 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 volumeMounts: - mountPath: /etc/cloud name: vsphere-config-volume readOnly: true - name: csi-provisioner - image: k8s.gcr.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0 args: - "--v=4" - "--timeout=300s" @@ -394,7 +401,7 @@ spec: - mountPath: /csi name: socket-dir - name: csi-snapshotter - image: k8s.gcr.io/sig-storage/csi-snapshotter:v6.2.1 + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2 args: - "--v=4" - "--kube-api-qps=100" @@ -445,7 +452,7 @@ spec: dnsPolicy: "ClusterFirstWithHostNet" containers: - name: node-driver-registrar - image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.7.0 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -468,7 +475,7 @@ spec: - --mode=kubelet-registration-probe initialDelaySeconds: 3 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.0.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -530,7 +537,7 @@ spec: periodSeconds: 5 failureThreshold: 3 - name: liveness-probe - image: k8s.gcr.io/sig-storage/livenessprobe:v2.9.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 args: - "--v=4" - "--csi-address=/csi/csi.sock" @@ -592,7 +599,7 @@ spec: serviceAccountName: vsphere-csi-node containers: - name: node-driver-registrar - image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.7.0 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -615,7 +622,7 @@ spec: - --mode=kubelet-registration-probe initialDelaySeconds: 3 - name: vsphere-csi-node - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.0.2 + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.2 args: - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" - "--fss-namespace=$(CSI_NAMESPACE)" @@ -672,7 +679,7 @@ spec: periodSeconds: 5 failureThreshold: 3 - name: liveness-probe - image: k8s.gcr.io/sig-storage/livenessprobe:v2.9.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 args: - "--v=4" - "--csi-address=/csi/csi.sock" diff --git a/pipeline/deploy-staging.sh b/pipeline/deploy-staging.sh index d4c6404e9b..851d8431f0 100755 --- a/pipeline/deploy-staging.sh +++ b/pipeline/deploy-staging.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set +x if [[ -z "${VSPHERE_CSI_CONTROLLER_IMAGE}" ]] @@ -21,10 +35,30 @@ then fi git clone "$CNS_CSI_STAGING_REPO" || exit 1 + +# Update the CNS-CSI manifest files to capture any changes. +cp -R manifests/supervisorcluster/* staging-cd/ || exit 1 cd staging-cd || exit 1 -# Patch the yaml file with the driver and syncer images from build job. -yq -i '(.spec.template.spec.containers[0].image = env(VSPHERE_CSI_CONTROLLER_IMAGE)) | (.spec.template.spec.containers[1].image = env(VSPHERE_SYNCER_IMAGE))' staging/patch.yaml || exit 1 +# Patch the CSI controller patch yaml file with the driver and syncer images from build job. +yq -i '(.spec.template.spec.containers[0].image = env(VSPHERE_CSI_CONTROLLER_IMAGE)) | (.spec.template.spec.containers[1].image = env(VSPHERE_SYNCER_IMAGE))' staging/csi-controller-patch.yaml || exit 1 + +# Patch the CSI webhook patch yaml file with the syncer images from build job. +yq -i '(.spec.template.spec.containers[0].image = env(VSPHERE_SYNCER_IMAGE))' staging/csi-webhook-patch.yaml || exit 1 + +# The kubeconfig has restricted read-only access to only vmware-system-csi namespace in the Supervisor. +# It can only monitor the CSI deployments. +export KUBECONFIG=$CNS_STAGING_SV_KUBECONFIG +export K8S_MAJOR_VERSION +export K8S_MINOR_VERSION + +K8S_MAJOR_VERSION=$(kubectl version -o json | jq .serverVersion.major | tr -d '"') +K8S_MINOR_VERSION=$(kubectl version -o json | jq .serverVersion.minor | tr -d '"') + +echo "Supervisor version: $K8S_MAJOR_VERSION.$K8S_MINOR_VERSION" + +# Replace the kubernetes version in kustomization.yaml +yq -i '.bases = ["../" + env(K8S_MAJOR_VERSION) + "." + env(K8S_MINOR_VERSION)]' staging/kustomization.yaml || exit 1 # If there are any changes, then commit the code changes and push it to the repo. if git diff | grep diff; @@ -34,12 +68,32 @@ then git add . || exit 1 git config user.email "svc.bot-cns@vmware.com" || exit 1 git config user.name "svc.bot-cns" || exit 1 - git commit -m "Pipeline updated staging/patch.yaml with images $VSPHERE_CSI_CONTROLLER_IMAGE and $VSPHERE_SYNCER_IMAGE" || exit 1 + git commit -m "Pipeline updated manifest files with images $VSPHERE_CSI_CONTROLLER_IMAGE and $VSPHERE_SYNCER_IMAGE" || exit 1 git push origin main || exit 1 else echo "No code changes pushed to the staging repo." fi -# TODO: Add code to wait for the CD infra to update the CSI in the staging environment. +while true +do + tmp_file=/tmp/$$ + kubectl -n vmware-system-csi get deployment vsphere-csi-controller -o=jsonpath="{'\n'}{range .spec.template.spec.containers[*]}{.image}{'\n'}{end}" | tee $tmp_file + if grep "$VSPHERE_CSI_CONTROLLER_IMAGE" $tmp_file; + then + echo "CSI deployment is patched." + break + fi + echo "CSI deployment not yet patched. Sleeping for 1 second..." + sleep 1 +done + +echo "Sleeping for 1 min for WCP to reconcile the CSI patch.." +sleep 60 +echo "Wait for 2 mins for the CSI deployment to be ready." +if ! kubectl wait deployment -n vmware-system-csi vsphere-csi-controller --for=jsonpath="{.status.readyReplicas}"=3 --timeout=120s; +then + echo "CSI deployment is not ready within 120s." + exit 1 +fi echo "Completed deploying CSI images to the staging environment." diff --git a/pipeline/deploy.sh b/pipeline/deploy.sh index 2c90418a00..e124409fba 100755 --- a/pipeline/deploy.sh +++ b/pipeline/deploy.sh @@ -1,60 +1,123 @@ #!/bin/bash +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set +x if [[ -z "${VSPHERE_CSI_CONTROLLER_IMAGE}" ]] then - echo "Env variable unset: VSPHERE_CSI_CONTROLLER_IMAGE" + echo "Env variable unset: VSPHERE_CSI_CONTROLLER_IMAGE." exit 1 fi if [[ -z "${VSPHERE_SYNCER_IMAGE}" ]] then - echo "Env variable unset: VSPHERE_SYNCER_IMAGE" + echo "Env variable unset: VSPHERE_SYNCER_IMAGE." exit 1 fi # Borrow a testbed from CSI Testbed Pool Svc. -if ! testbed=$(curl -X 'GET' "${CNS_TESTBEDPOOL_SVC_URL}"); +if ! testbed=$(curl -X 'PUT' "$TESTBED_POOL_SERVICE_ENDPOINT/v1/pool/$TESTBED_POOL_ID/borrowTestbed" -H 'accept: application/json' -u "$CNS_MANAGER_USERNAME:$CNS_MANAGER_PASSWORD"); then + echo "Unable to borrow a testbed" + exit 1 +else + # borrowTestbed API succeed + echo "Got a testbed from testbed pool service" +fi + +# Get TESTBED_ID +testbedId=$(echo "$testbed" | jq '.id' | tr -d '"') +if [ -z "${testbedId}" ] then - echo "Unable to borrow a testbed!" + echo "testbedId is empty" exit 1 fi -echo "TestbedInfo: $testbed" +# Store the testbed ID. +echo "TESTBED_ID=$testbedId" >> build.env + +# Extract VC IP from borrow testbed API Response. if ! vcIp=$(echo "$testbed" | jq '.vcIp'|tr -d '"'); then - echo "Error getting the vcIp!" + echo "Error getting the vcIp." + exit 1 +fi + +# Extract External Gateway VM IP from borrow testbed API Response. +if ! externalVMGatewayIp=$(echo "$testbed" | jq '.externalVMGatewayIp'|tr -d '"'); +then + echo "Error getting the externalVMGatewayIp." exit 1 fi -echo "VC_IP=$vcIp" >> build.env -if ! svAdminCreds=$(echo "$testbed" | jq '.svAdminCreds'|tr -d '"'|base64 -d); +# Extract VC root password from borrow testbed API Response. +if ! vcRootPassword=$(echo "$testbed" | jq '.vcRootPassword'|tr -d '"'); then - echo "Error getting the svAdminCreds!" + echo "Error getting the vcRootPassword." exit 1 fi -echo "svAdminCreds = $svAdminCreds" -SV_KUBECONFIG=/tmp/$$ -echo "$testbed" | jq '.svAdminCreds'|tr -d '"'|base64 -d > $SV_KUBECONFIG -export KUBECONFIG=$SV_KUBECONFIG -if ! kustomize build pipeline/dev | envsubst | kubectl apply -f -; +# Extract VC username from borrow testbed API Response. +if ! vimUsername=$(echo "$testbed" | jq '.vimUsername'|tr -d '"'); then - echo "Error patching the CSI images in the testbed!" + echo "Error getting the vimUsername." exit 1 fi +# Extract VC Admin password from borrow testbed API Response. +if ! vimPassword=$(echo "$testbed" | jq '.vimPassword'|tr -d '"'); +then + echo "Error getting the vimPassword." + exit 1 +fi + +# Print all the values into Console. +echo "VSPHERE_CSI_CONTROLLER_IMAGE = $VSPHERE_CSI_CONTROLLER_IMAGE" +echo "VSPHERE_SYNCER_IMAGE = $VSPHERE_SYNCER_IMAGE" + +# Store all the values into Artifacts. +{ echo "TESTBED_ID=$testbedId"; echo "vcIp=$vcIp"; echo "vcRootPassword=$vcRootPassword"; echo "vimPassword=$vimPassword"; echo "vimUsername=$vimUsername"; echo "externalVMGatewayIp=$externalVMGatewayIp";} >> ./env.json + +SV_KUBECONFIG=/tmp/$$ + +echo "$testbed" | jq '.kubeConfig'|tr -d '"'|base64 -d > $SV_KUBECONFIG + +export KUBECONFIG=$SV_KUBECONFIG + +echo "sv_kubeconfig_content=$(cat $SV_KUBECONFIG)" > ./sv_kubeconfig_content.yaml + +# Pod status on testbed before patching the CSI Images +kubectl get pods -n vmware-system-csi + +kubectl set env deployment/vsphere-csi-controller -n vmware-system-csi --containers=vsphere-syncer FULL_SYNC_INTERVAL_MINUTES=2 VOLUME_HEALTH_INTERVAL_MINUTES=2 +kubectl set image deployment/vsphere-csi-controller -n vmware-system-csi vsphere-syncer="$VSPHERE_SYNCER_IMAGE" vsphere-csi-controller="$VSPHERE_CSI_CONTROLLER_IMAGE" +kubectl set image deployment/vsphere-csi-webhook -n vmware-system-csi vsphere-webhook="$VSPHERE_SYNCER_IMAGE" + # Sleep for 60 seconds so that k8s can act on the applied changes. echo "Sleeping for 60 seconds..." sleep 60 # Wait for 2 mins for the CSI deployment to be ready. -if ! kubectl wait deployment -n vmware-system-csi vsphere-csi-controller --for=jsonpath="{.status.readyReplicas}"=3 --timeout=120s; +if ! kubectl rollout status deployment/vsphere-csi-controller -n vmware-system-csi --timeout=120s; then - echo "CSI deployment is not ready within 120s." - exit 1 + echo "CSI deployment is not ready within 120s." + exit 1 fi +# Pod status on testbed after patching the CSI Images +kubectl get pods -n vmware-system-csi + echo "Successfully patched the CSI images." diff --git a/pipeline/dev/patch.yaml b/pipeline/dev/patch.yaml index 0b4cfe0ec6..55c18fbe87 100644 --- a/pipeline/dev/patch.yaml +++ b/pipeline/dev/patch.yaml @@ -11,35 +11,24 @@ spec: image: ${VSPHERE_CSI_CONTROLLER_IMAGE} - name: vsphere-syncer image: ${VSPHERE_SYNCER_IMAGE} - - name: csi-provisioner - image: localhost:5000/vmware/csi-provisioner/csi-provisioner:v3.1.0_vmware.2 - - name: csi-attacher - image: localhost:5000/vmware.io/csi-attacher:v3.4.0_vmware.1 - - name: csi-resizer - image: localhost:5000/vmware/kubernetes-csi_external-resizer/kubernetes-csi_external-resizer:v1.4.0_vmware.1 - - name: liveness-probe - image: localhost:5000/vmware.io/csi-livenessprobe:v2.6.0_vmware.1 --- + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vsphere-csi-webhook + namespace: vmware-system-csi +spec: + template: + spec: + containers: + - name: vsphere-webhook + image: ${VSPHERE_SYNCER_IMAGE} --- apiVersion: v1 data: - "volume-extend": "true" - "volume-health": "true" - "online-volume-extend": "true" - "file-volume": "true" - "csi-auth-check": "true" - "trigger-csi-fullsync": "false" - "csi-sv-feature-states-replication": "true" - "fake-attach": "true" - "async-query-volume": "true" - "improved-csi-idempotency": "true" - "block-volume-snapshot": "false" - "sibling-replica-bound-pvc-check": "true" # Set tkgs-ha to false, else csi-controller and csi-syncer containers do not start. "tkgs-ha": "false" - "list-volumes": "false" - "cnsmgr-suspend-create-volume": "true" - "listview-tasks": "false" kind: ConfigMap metadata: name: csi-feature-states diff --git a/pipeline/e2e-tests-staging.sh b/pipeline/e2e-tests-staging.sh index ce75ecd789..daa6e61e83 100755 --- a/pipeline/e2e-tests-staging.sh +++ b/pipeline/e2e-tests-staging.sh @@ -1,8 +1,47 @@ #!/bin/bash -set -x +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -echo "Running e2e tests in the staging environment..." -# TODO: Implement the script to run the e2e tests. +set +x -echo "Completed running e2e tests." +export E2E_TEST_CONF_FILE=$CI_BUILDS_DIR/e2eTest.conf + +tee "$E2E_TEST_CONF_FILE" >/dev/null < sv_kubeconfig_content.yaml +kubeconfigPath="$(pwd)/sv_kubeconfig_content.yaml" + +echo "$vcIp" +echo "$vcRootPassword" > vc_pwd + +export GOVC_INSECURE=1 +export GOVC_URL="https://$vimUsername:$vimPassword@$vcIp" + +DATACENTER=$(govc datacenter.info | grep -i path | awk '{print $2}') +export DATACENTER=$DATACENTER + +COMPUTE_CLUSTER_NAME=$(govc namespace.cluster.ls | awk -F'/' '{print $5}') +export COMPUTE_CLUSTER_NAME=$COMPUTE_CLUSTER_NAME + +export E2E_TEST_CONF_FILE=$CI_BUILDS_DIR/e2eTest.conf + +echo "$GOVC_URL" +echo "$DATACENTER" +echo "$COMPUTE_CLUSTER_NAME" +echo "$E2E_TEST_CONF_FILE" + +tee "$E2E_TEST_CONF_FILE" >/dev/null < /dev/null diff --git a/pkg/apis/migration/migration.go b/pkg/apis/migration/migration.go index f5b48629b1..b1532e8a02 100644 --- a/pkg/apis/migration/migration.go +++ b/pkg/apis/migration/migration.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" + migrationconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/migration/config" migrationv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/migration/v1alpha1" @@ -67,9 +68,13 @@ type VolumeMigrationService interface { GetVolumeID(ctx context.Context, volumeSpec *VolumeSpec, registerIfNotFound bool) (string, error) // GetVolumePath returns VolumePath for a given VolumeID. - // Returns an error if not able to retrieve VolumePath. + // It will also create a CnsVSphereVolumeMigration CR if it's not present. GetVolumePath(ctx context.Context, volumeID string) (string, error) + // GetVolumePathFromMigrationServiceCache checks the in-memory cache for a volumeID + // a cache hit means that the volume is a migrated in-tree volume + GetVolumePathFromMigrationServiceCache(ctx context.Context, volumeID string) (string, error) + // DeleteVolumeInfo helps delete mapping of volumePath to VolumeID for // specified volumeID. DeleteVolumeInfo(ctx context.Context, volumeID string) error @@ -187,7 +192,10 @@ func GetVolumeMigrationService(ctx context.Context, volumeManager *cnsvolume.Man volumeMigrationObject.Spec.VolumePath, volumeMigrationObject.Spec.VolumeID) }, } - informer.Informer().AddEventHandler(handlers) + _, err = informer.Informer().AddEventHandler(handlers) + if err != nil { + return + } stopCh := make(chan struct{}) informer.Informer().Run(stopCh) }() @@ -362,6 +370,26 @@ func (volumeMigration *volumeMigration) GetVolumePath(ctx context.Context, volum return fileBackingInfo.FilePath, nil } +// GetVolumePathFromMigrationServiceCache checks the in-memory cache for a volumeID +// a cache hit means that the volume is a migrated in-tree volume +func (volumeMigration *volumeMigration) GetVolumePathFromMigrationServiceCache(ctx context.Context, + volumeID string) (string, error) { + log := logger.GetLogger(ctx) + var volumePath string + volumeMigration.volumePathToVolumeID.Range(func(key, value interface{}) bool { + if value.(string) == volumeID { + volumePath = key.(string) + log.Infof("Found VolumePath %v for VolumeID: %q in the cache", volumePath, volumeID) + return false + } + return true + }) + if volumePath != "" { + return volumePath, nil + } + return "", common.ErrNotFound +} + // saveVolumeInfo helps create CR for given cnsVSphereVolumeMigration. This func // also update local cache with supplied cnsVSphereVolumeMigration, after // successful creation of CR @@ -585,13 +613,41 @@ func (volumeMigration *volumeMigration) cleanupStaleCRDInstances() { cnsVolumesMap[vol.VolumeId.Id] = true } log.Debugf("cnsVolumesMap: %v:", cnsVolumesMap) + k8sclient, err := k8s.NewClient(ctx) + if err != nil { + log.Errorf("failed to get k8sclient with error: %v", err) + continue + } + // The runCleanupRoutine is only triggered from Syncer container, + // hence we are checking for PV objects in the k8s in the below code snippet. + pvList, err := k8sclient.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + if err != nil { + log.Errorf("failed to list PersistentVolumes with error %v.", err) + continue + } for _, volumeMigrationResource := range volumeMigrationResourceList.Items { if _, existsInCNSVolumesMap := cnsVolumesMap[volumeMigrationResource.Name]; !existsInCNSVolumesMap { log.Debugf("Volume with id %s is not found in CNS", volumeMigrationResource.Name) - err = volumeMigrationInstance.DeleteVolumeInfo(ctx, volumeMigrationResource.Name) - if err != nil { - log.Warnf("failed to delete volume mapping CR for %s with error %+v", volumeMigrationResource.Name, err) - continue + // Check if a PV exists for the given volumePath in CnsVSphereVolumeMigration CR + pvFound := false + for _, pv := range pvList.Items { + if pv.Spec.VsphereVolume != nil && + pv.Spec.VsphereVolume.VolumePath == volumeMigrationResource.Spec.VolumePath { + pvFound = true + log.Infof("PV %s with VolumePath %s is found in k8s, but CNS does not have an entry."+ + "Skipping the %s CR deletion.", + pv.Name, pv.Spec.VsphereVolume.VolumePath, volumeMigrationResource.Name) + break + } + } + // Delete the CnsVSphereVolumeMigration CR only when there is no corresponding PV in k8s + if !pvFound { + log.Debugf("Deleting CnsVSphereVolumeMigration CR: %s", volumeMigrationResource.Name) + err = volumeMigrationInstance.DeleteVolumeInfo(ctx, volumeMigrationResource.Name) + if err != nil { + log.Warnf("failed to delete volume mapping CR for %s with error %+v", volumeMigrationResource.Name, err) + continue + } } } } diff --git a/pkg/common/cns-lib/node/manager.go b/pkg/common/cns-lib/node/manager.go index 71e1673087..fe5af29492 100644 --- a/pkg/common/cns-lib/node/manager.go +++ b/pkg/common/cns-lib/node/manager.go @@ -40,10 +40,6 @@ var ( type Manager interface { // SetKubernetesClient sets kubernetes client for node manager. SetKubernetesClient(client clientset.Interface) - // SetUseNodeUuid sets whether the node manager should use - // K8s CSINode API object or the K8s Node API object to retrieve - // the node UUID. - SetUseNodeUuid(useNodeUuid bool) // RegisterNode registers a node given its UUID, name. RegisterNode(ctx context.Context, nodeUUID string, nodeName string) error // DiscoverNode discovers a registered node given its UUID. This method @@ -56,13 +52,16 @@ type Manager interface { // given its UUID. If datacenter is present, GetNode will search within this // datacenter given its UUID. If not, it will search in all registered // datacenters. - GetNode(ctx context.Context, nodeUUID string, dc *vsphere.Datacenter) (*vsphere.VirtualMachine, error) - // GetNodeByName refreshes and returns the VirtualMachine for a registered + GetNodeVMAndUpdateCache(ctx context.Context, nodeUUID string, dc *vsphere.Datacenter) (*vsphere.VirtualMachine, error) + // GetNodeVMByUuid returns the VirtualMachine for a registered node + // given its UUID. + GetNodeVMByUuid(ctx context.Context, nodeUUID string) (*vsphere.VirtualMachine, error) + // GetNodeVMByNameAndUpdateCache refreshes and returns the VirtualMachine for a registered // node given its name. - GetNodeByName(ctx context.Context, nodeName string) (*vsphere.VirtualMachine, error) - // GetNodeByNameOrUUID refreshes and returns VirtualMachine for a registered node + GetNodeVMByNameAndUpdateCache(ctx context.Context, nodeName string) (*vsphere.VirtualMachine, error) + // GetNodeVMByNameOrUUID refreshes and returns VirtualMachine for a registered node // using either its name or UUID. - GetNodeByNameOrUUID(ctx context.Context, nodeName string) (*vsphere.VirtualMachine, error) + GetNodeVMByNameOrUUID(ctx context.Context, nodeNameOrUuid string) (*vsphere.VirtualMachine, error) // GetNodeNameByUUID fetches the name of the node given the VM UUID. GetNodeNameByUUID(ctx context.Context, nodeUUID string) (string, error) // GetAllNodes refreshes and returns VirtualMachine for all registered @@ -108,9 +107,6 @@ type defaultManager struct { nodeNameToUUID sync.Map // k8s client. k8sClient clientset.Interface - // useNodeUuid uses K8s CSINode API instead of - // K8s Node to retrieve the node UUID. - useNodeUuid bool } // SetKubernetesClient sets specified kubernetes client to defaultManager.k8sClient @@ -118,13 +114,6 @@ func (m *defaultManager) SetKubernetesClient(client clientset.Interface) { m.k8sClient = client } -// SetUseNodeUuid sets whether the node manager should use -// K8s CSINode API object or the K8s Node API object to retrieve -// node UUID. -func (m *defaultManager) SetUseNodeUuid(useNodeUuid bool) { - m.useNodeUuid = useNodeUuid -} - // RegisterNode registers a node with node manager using its UUID, name. func (m *defaultManager) RegisterNode(ctx context.Context, nodeUUID string, nodeName string) error { log := logger.GetLogger(ctx) @@ -155,27 +144,27 @@ func (m *defaultManager) DiscoverNode(ctx context.Context, nodeUUID string) erro return nil } -// GetNodeByName refreshes and returns the VirtualMachine for a registered node +// GetNodeVMByNameAndUpdateCache refreshes and returns the VirtualMachine for a registered node // given its name. -func (m *defaultManager) GetNodeByName(ctx context.Context, nodeName string) (*vsphere.VirtualMachine, error) { +func (m *defaultManager) GetNodeVMByNameAndUpdateCache(ctx context.Context, + nodeName string) (*vsphere.VirtualMachine, error) { log := logger.GetLogger(ctx) nodeUUID, found := m.nodeNameToUUID.Load(nodeName) if found && nodeUUID != nil && nodeUUID.(string) != "" { - return m.GetNode(ctx, nodeUUID.(string), nil) + return m.GetNodeVMAndUpdateCache(ctx, nodeUUID.(string), nil) } log.Infof("Empty nodeUUID observed in cache for the node: %q", nodeName) - k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, nodeName, - m.useNodeUuid) + k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, nodeName) if err != nil { log.Errorf("failed to get node UUID from node: %q. Err: %v", nodeName, err) return nil, err } m.nodeNameToUUID.Store(nodeName, k8snodeUUID) - return m.GetNode(ctx, k8snodeUUID, nil) + return m.GetNodeVMAndUpdateCache(ctx, k8snodeUUID, nil) } -func (m *defaultManager) GetNodeByNameOrUUID( +func (m *defaultManager) GetNodeVMByNameOrUUID( ctx context.Context, nodeNameOrUUID string) (*vsphere.VirtualMachine, error) { log := logger.GetLogger(ctx) nodeUUID, found := m.nodeNameToUUID.Load(nodeNameOrUUID) @@ -184,16 +173,15 @@ func (m *defaultManager) GetNodeByNameOrUUID( return nil, ErrNodeNotFound } if nodeUUID != nil && nodeUUID.(string) != "" { - return m.GetNode(ctx, nodeUUID.(string), nil) + return m.GetNodeVMAndUpdateCache(ctx, nodeUUID.(string), nil) } log.Infof("Empty nodeUUID observed in cache for the node: %q", nodeNameOrUUID) - k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, nodeNameOrUUID, m.useNodeUuid) + k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, nodeNameOrUUID) if err != nil { log.Errorf("failed to get node UUID from node: %q. Err: %v", nodeNameOrUUID, err) return nil, err } - m.nodeNameToUUID.Store(nodeNameOrUUID, k8snodeUUID) - return m.GetNode(ctx, k8snodeUUID, nil) + return m.GetNodeVMAndUpdateCache(ctx, k8snodeUUID, nil) } // GetNodeNameByUUID fetches the name of the node given the VM UUID. @@ -226,7 +214,7 @@ func (m *defaultManager) GetK8sNode(ctx context.Context, nodename string) (*v1.N // GetNode refreshes and returns the VirtualMachine for a registered node // given its UUID. -func (m *defaultManager) GetNode(ctx context.Context, +func (m *defaultManager) GetNodeVMAndUpdateCache(ctx context.Context, nodeUUID string, dc *vsphere.Datacenter) (*vsphere.VirtualMachine, error) { log := logger.GetLogger(ctx) vmInf, discovered := m.nodeVMs.Load(nodeUUID) @@ -266,6 +254,27 @@ func (m *defaultManager) GetNode(ctx context.Context, return vm, nil } +// GetNodeVMByUuid returns the VirtualMachine for a registered node +// given its UUID. This is called by ControllerPublishVolume and +// ControllerUnpublishVolume to perform attach and detach operations. +func (m *defaultManager) GetNodeVMByUuid(ctx context.Context, + nodeUUID string) (*vsphere.VirtualMachine, error) { + log := logger.GetLogger(ctx) + vmInf, discovered := m.nodeVMs.Load(nodeUUID) + if !discovered { + log.Infof("Node VM not found with nodeUUID %s", nodeUUID) + vm, err := vsphere.GetVirtualMachineByUUID(ctx, nodeUUID, false) + if err != nil { + log.Errorf("Couldn't find VM instance with nodeUUID %s, failed to discover with err: %v", nodeUUID, err) + return nil, err + } + log.Infof("Node was successfully found with nodeUUID %s in vm %v", nodeUUID, vm) + return vm, nil + } + vm := vmInf.(*vsphere.VirtualMachine) + return vm, nil +} + // GetAllNodes refreshes and returns VirtualMachine for all registered nodes. func (m *defaultManager) GetAllNodes(ctx context.Context) ([]*vsphere.VirtualMachine, error) { log := logger.GetLogger(ctx) @@ -276,8 +285,7 @@ func (m *defaultManager) GetAllNodes(ctx context.Context) ([]*vsphere.VirtualMac m.nodeNameToUUID.Range(func(nodeName, nodeUUID interface{}) bool { if nodeName != nil && nodeUUID != nil && nodeUUID.(string) == "" { log.Infof("Empty node UUID observed for the node: %q", nodeName) - k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, - nodeName.(string), m.useNodeUuid) + k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, nodeName.(string)) if err != nil { log.Errorf("failed to get node UUID from node: %q. Err: %v", nodeName, err) return true @@ -343,8 +351,7 @@ func (m *defaultManager) GetAllNodesByVC(ctx context.Context, vcHost string) ([] m.nodeNameToUUID.Range(func(nodeName, nodeUUID interface{}) bool { if nodeName != nil && nodeUUID != nil && nodeUUID.(string) == "" { log.Infof("Empty node UUID observed for the node: %q", nodeName) - k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, - nodeName.(string), m.useNodeUuid) + k8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, nodeName.(string)) if err != nil { log.Errorf("failed to get node UUID from node: %q. Err: %v", nodeName, err) return true diff --git a/pkg/common/cns-lib/node/manager_test.go b/pkg/common/cns-lib/node/manager_test.go deleted file mode 100644 index e7692f62ae..0000000000 --- a/pkg/common/cns-lib/node/manager_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package node - -import ( - "context" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" -) - -func TestDefaultManager_GetNodeByName(t *testing.T) { - nodeName := "foobar.dev.lan" - m := defaultManager{ - nodeVMs: sync.Map{}, - nodeNameToUUID: sync.Map{}, - k8sClient: nil, - useNodeUuid: false, - } - - k8sClient := k8sClientWithNodes(nodeName) - m.SetKubernetesClient(k8sClient) - - vm, _ := m.GetNodeByName(context.TODO(), nodeName) - if vm != nil { - t.Errorf("Unexpected vm found:%v", vm) - } - - nodeUUID, ok := m.nodeNameToUUID.Load(nodeName) - if !ok { - t.Errorf("node name should be loaded into nodeUUID map") - } - assert.Equal(t, "foobar", nodeUUID) -} - -func k8sClientWithNodes(nodeName string) clientset.Interface { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - Spec: v1.NodeSpec{ - ProviderID: "vsphere://foobar", - }, - } - client := fake.NewSimpleClientset(node) - return client -} diff --git a/pkg/common/cns-lib/node/nodes.go b/pkg/common/cns-lib/node/nodes.go index 06989e6401..031e17c561 100644 --- a/pkg/common/cns-lib/node/nodes.go +++ b/pkg/common/cns-lib/node/nodes.go @@ -20,7 +20,6 @@ import ( "context" "fmt" - v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere" @@ -35,11 +34,8 @@ type Nodes struct { } // Initialize helps initialize node manager and node informer manager. -// If useNodeUuid is set, an informer on K8s CSINode is created. -// if not, an informer on K8s Node API object is created. -func (nodes *Nodes) Initialize(ctx context.Context, useNodeUuid bool) error { +func (nodes *Nodes) Initialize(ctx context.Context) error { nodes.cnsNodeManager = GetManager(ctx) - nodes.cnsNodeManager.SetUseNodeUuid(useNodeUuid) k8sclient, err := k8s.NewClient(ctx) if err != nil { log := logger.GetLogger(ctx) @@ -48,68 +44,12 @@ func (nodes *Nodes) Initialize(ctx context.Context, useNodeUuid bool) error { } nodes.cnsNodeManager.SetKubernetesClient(k8sclient) nodes.informMgr = k8s.NewInformer(ctx, k8sclient, true) - if useNodeUuid { - nodes.informMgr.AddCSINodeListener(nodes.csiNodeAdd, - nodes.csiNodeUpdate, nodes.csiNodeDelete) - } else { - nodes.informMgr.AddNodeListener(nodes.nodeAdd, - nodes.nodeUpdate, nodes.nodeDelete) - } + nodes.informMgr.AddCSINodeListener(nodes.csiNodeAdd, + nodes.csiNodeUpdate, nodes.csiNodeDelete) nodes.informMgr.Listen() return nil } -func (nodes *Nodes) nodeAdd(obj interface{}) { - ctx, log := logger.GetNewContextWithLogger() - node, ok := obj.(*v1.Node) - if node == nil || !ok { - log.Warnf("nodeAdd: unrecognized object %+v", obj) - return - } - err := nodes.cnsNodeManager.RegisterNode(ctx, - cnsvsphere.GetUUIDFromProviderID(node.Spec.ProviderID), node.Name) - if err != nil { - log.Warnf("failed to register node:%q. err=%v", node.Name, err) - } -} - -func (nodes *Nodes) nodeUpdate(oldObj interface{}, newObj interface{}) { - ctx, log := logger.GetNewContextWithLogger() - newNode, ok := newObj.(*v1.Node) - if !ok { - log.Warnf("nodeUpdate: unrecognized object newObj %[1]T%+[1]v", newObj) - return - } - oldNode, ok := oldObj.(*v1.Node) - if !ok { - log.Warnf("nodeUpdate: unrecognized object oldObj %[1]T%+[1]v", oldObj) - return - } - if oldNode.Spec.ProviderID != newNode.Spec.ProviderID { - log.Infof("nodeUpdate: Observed ProviderID change from %q to %q for the node: %q", - oldNode.Spec.ProviderID, newNode.Spec.ProviderID, newNode.Name) - - err := nodes.cnsNodeManager.RegisterNode(ctx, - cnsvsphere.GetUUIDFromProviderID(newNode.Spec.ProviderID), newNode.Name) - if err != nil { - log.Warnf("nodeUpdate: Failed to register node:%q. err=%v", newNode.Name, err) - } - } -} - -func (nodes *Nodes) nodeDelete(obj interface{}) { - ctx, log := logger.GetNewContextWithLogger() - node, ok := obj.(*v1.Node) - if node == nil || !ok { - log.Warnf("nodeDelete: unrecognized object %+v", obj) - return - } - err := nodes.cnsNodeManager.UnregisterNode(ctx, node.Name) - if err != nil { - log.Warnf("failed to unregister node:%q. err=%v", node.Name, err) - } -} - func (nodes *Nodes) csiNodeAdd(obj interface{}) { ctx, log := logger.GetNewContextWithLogger() csiNode, ok := obj.(*storagev1.CSINode) @@ -182,19 +122,19 @@ func (nodes *Nodes) csiNodeDelete(obj interface{}) { } } -// GetNodeByName returns VirtualMachine object for given nodeName. +// GetNodeVMByNameAndUpdateCache returns VirtualMachine object for given nodeName. // This is called by ControllerPublishVolume and ControllerUnpublishVolume // to perform attach and detach operations. -func (nodes *Nodes) GetNodeByName(ctx context.Context, nodeName string) ( +func (nodes *Nodes) GetNodeVMByNameAndUpdateCache(ctx context.Context, nodeName string) ( *cnsvsphere.VirtualMachine, error) { - return nodes.cnsNodeManager.GetNodeByName(ctx, nodeName) + return nodes.cnsNodeManager.GetNodeVMByNameAndUpdateCache(ctx, nodeName) } -// GetNodeByNameOrUUID returns VirtualMachine object for given nodeName +// GetNodeVMByNameOrUUID returns VirtualMachine object for given nodeName // This function can be called either using nodeName or nodeUID. -func (nodes *Nodes) GetNodeByNameOrUUID( +func (nodes *Nodes) GetNodeVMByNameOrUUID( ctx context.Context, nodeNameOrUUID string) (*cnsvsphere.VirtualMachine, error) { - return nodes.cnsNodeManager.GetNodeByNameOrUUID(ctx, nodeNameOrUUID) + return nodes.cnsNodeManager.GetNodeVMByNameOrUUID(ctx, nodeNameOrUUID) } // GetNodeNameByUUID fetches the name of the node given the VM UUID. @@ -203,11 +143,11 @@ func (nodes *Nodes) GetNodeNameByUUID(ctx context.Context, nodeUUID string) ( return nodes.cnsNodeManager.GetNodeNameByUUID(ctx, nodeUUID) } -// GetNodeByUuid returns VirtualMachine object for given nodeUuid. +// GetNodeVMByUuid returns VirtualMachine object for given nodeUuid. // This is called by ControllerPublishVolume and ControllerUnpublishVolume // to perform attach and detach operations. -func (nodes *Nodes) GetNodeByUuid(ctx context.Context, nodeUuid string) (*cnsvsphere.VirtualMachine, error) { - return nodes.cnsNodeManager.GetNode(ctx, nodeUuid, nil) +func (nodes *Nodes) GetNodeVMByUuid(ctx context.Context, nodeUuid string) (*cnsvsphere.VirtualMachine, error) { + return nodes.cnsNodeManager.GetNodeVMByUuid(ctx, nodeUuid) } // GetAllNodes returns VirtualMachine objects for all registered nodes in cluster. diff --git a/pkg/common/cns-lib/volume/listview.go b/pkg/common/cns-lib/volume/listview.go index 3e1e3e1fce..1da95db8a1 100644 --- a/pkg/common/cns-lib/volume/listview.go +++ b/pkg/common/cns-lib/volume/listview.go @@ -12,6 +12,7 @@ import ( "github.com/vmware/govmomi/vim25/types" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" ) @@ -87,21 +88,16 @@ func (l *ListViewImpl) createListView(ctx context.Context, tasks []types.Managed return err } l.listView = listView - log.Infof("created listView object %+v for virtualCenter: %+v", l.listView.Reference(), l.virtualCenter) + log.Infof("created listView object %+v for virtualCenter: %+v", + l.listView.Reference(), l.virtualCenter.Config.Host) return nil } // SetVirtualCenter is a setter method for vc. use case: ReloadConfiguration -func (l *ListViewImpl) SetVirtualCenter(ctx context.Context, virtualCenter *cnsvsphere.VirtualCenter) error { +func (l *ListViewImpl) SetVirtualCenter(ctx context.Context, virtualCenter *cnsvsphere.VirtualCenter) { log := logger.GetLogger(ctx) l.virtualCenter = virtualCenter - client, err := virtualCenter.NewClient(ctx) - if err != nil { - return logger.LogNewErrorf(log, "failed to create a govmomiClient for listView. error: %+v", err) - } - client.Timeout = noTimeout - l.govmomiClient = client - return nil + log.Debugf("New virtualCenter object stored for use by ListView") } func getListViewWaitFilter(listView *view.ListView) *property.WaitFilter { @@ -122,11 +118,6 @@ func getListViewWaitFilter(listView *view.ListView) *property.WaitFilter { func (l *ListViewImpl) AddTask(ctx context.Context, taskMoRef types.ManagedObjectReference, ch chan TaskResult) error { log := logger.GetLogger(ctx) log.Infof("AddTask called for %+v", taskMoRef) - err := l.listView.Add(l.ctx, []types.ManagedObjectReference{taskMoRef}) - if err != nil { - return logger.LogNewErrorf(log, "failed to add task to ListView. error: %+v", err) - } - log.Infof("task %+v added to listView", taskMoRef) l.taskMap.Upsert(taskMoRef, TaskDetails{ Reference: taskMoRef, @@ -134,6 +125,13 @@ func (l *ListViewImpl) AddTask(ctx context.Context, taskMoRef types.ManagedObjec ResultCh: ch, }) log.Debugf("task %+v added to map", taskMoRef) + + err := l.listView.Add(l.ctx, []types.ManagedObjectReference{taskMoRef}) + if err != nil { + l.taskMap.Delete(taskMoRef) + return logger.LogNewErrorf(log, "failed to add task to ListView. error: %+v", err) + } + log.Infof("task %+v added to listView", taskMoRef) return nil } @@ -166,8 +164,18 @@ func (l *ListViewImpl) isClientValid() error { } else if userSession != nil { return nil } + + err := cnsvsphere.ReadVCConfigs(l.ctx, l.virtualCenter) + if err != nil { + return logger.LogNewErrorf(log, "failed to read VC config. err: %v", err) + } // If session has expired, create a new instance. - client, err := l.virtualCenter.NewClient(l.ctx) + useragent, err := config.GetSessionUserAgent(l.ctx) + if err != nil { + return logger.LogNewErrorf(log, "failed to get useragent for vCenter session. error: %+v", err) + } + useragent = useragent + "-listview" + client, err := l.virtualCenter.NewClient(l.ctx, useragent) if err != nil { return logger.LogNewErrorf(log, "failed to create a govmomi client for listView. error: %+v", err) } @@ -229,7 +237,8 @@ func (l *ListViewImpl) listenToTaskUpdates() { // we want to immediately return a fault for all the pending tasks in the map // note: this is not a task error but an error from the vc if err != nil { - log.Errorf("WaitForUpdates returned err: %v for vc: %+v", err, l.virtualCenter) + log.Errorf("WaitForUpdates returned err: %v for vc: %+v", err, + l.virtualCenter.Config.Host) recreateView = true l.reportErrorOnAllPendingTasks(err) } @@ -319,3 +328,15 @@ func (l *ListViewImpl) MarkTaskForDeletion(ctx context.Context, taskMoRef types. log.Infof("%v marked for deletion", taskMoRef) return nil } + +// LogoutSession is a setter method to logout vcenter session created +func (l *ListViewImpl) LogoutSession(ctx context.Context) error { + log := logger.GetLogger(ctx) + err := l.govmomiClient.Logout(l.ctx) + if err != nil { + log.Errorf("Error while logout vCenter session (list-view) for host %s, Error: %+v", l.virtualCenter.Config.Host, err) + return err + } + log.Infof("Logged out list-view vCenter session for host %s", l.virtualCenter.Config.Host) + return nil +} diff --git a/pkg/common/cns-lib/volume/listview_if.go b/pkg/common/cns-lib/volume/listview_if.go index 036d95ef48..9f0bbfaab0 100644 --- a/pkg/common/cns-lib/volume/listview_if.go +++ b/pkg/common/cns-lib/volume/listview_if.go @@ -17,7 +17,9 @@ type ListViewIf interface { RemoveTask(ctx context.Context, taskMoRef types.ManagedObjectReference) error // SetVirtualCenter is a setter method for the reference to the global vcenter object. // use case: ReloadConfiguration - SetVirtualCenter(ctx context.Context, virtualCenter *cnsvsphere.VirtualCenter) error + SetVirtualCenter(ctx context.Context, virtualCenter *cnsvsphere.VirtualCenter) + // LogoutSession logout the vCenter Session + LogoutSession(ctx context.Context) error // MarkTaskForDeletion marks a given task MoRef for deletion by a cleanup goroutine // use case: failure to remove task due to a vc issue MarkTaskForDeletion(ctx context.Context, taskMoRef types.ManagedObjectReference) error diff --git a/pkg/common/cns-lib/volume/listview_test.go b/pkg/common/cns-lib/volume/listview_test.go index 0d4b9c45fc..bfce9c2f39 100644 --- a/pkg/common/cns-lib/volume/listview_test.go +++ b/pkg/common/cns-lib/volume/listview_test.go @@ -18,9 +18,10 @@ import ( "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" ) +const testVCHost = "testVCHost" + func TestAddRemoveListView(t *testing.T) { ctx := logger.NewContextWithLogger(context.Background()) - model := simulator.VPX() defer model.Remove() if err := model.Create(); err != nil { @@ -37,6 +38,9 @@ func TestAddRemoveListView(t *testing.T) { t.Fatal(err) } + config := vsphere.VirtualCenterConfig{Host: testVCHost} + virtualCenter.Config = &config + listViewImpl, err := NewListViewImpl(ctx, virtualCenter, virtualCenter.Client) assert.NoError(t, err) listViewImpl.shouldStopListening = true @@ -83,6 +87,9 @@ func TestMarkForDeletion(t *testing.T) { t.Fatal(err) } + config := vsphere.VirtualCenterConfig{Host: testVCHost} + virtualCenter.Config = &config + listViewImpl, err := NewListViewImpl(ctx, virtualCenter, virtualCenter.Client) assert.NoError(t, err) listViewImpl.shouldStopListening = true diff --git a/pkg/common/cns-lib/volume/manager.go b/pkg/common/cns-lib/volume/manager.go index 4efcd17105..c26cdfa1aa 100644 --- a/pkg/common/cns-lib/volume/manager.go +++ b/pkg/common/cns-lib/volume/manager.go @@ -35,6 +35,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" csifault "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/fault" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/prometheus" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" @@ -54,6 +55,11 @@ const ( // used only for listView noTimeout = 0 * time.Minute + // VolumeOperationTimeoutInSeconds specifies the default CSI operation timeout in seconds + VolumeOperationTimeoutInSeconds = 300 + + listviewAdditionError = "failed to add task to list view" + // defaultOpsExpirationTimeInHours is expiration time for create volume operations. // TODO: This timeout will be configurable in future releases defaultOpsExpirationTimeInHours = 1 @@ -132,6 +138,8 @@ type Manager interface { task *object.Task, volNameFromInputSpec string, clusterID string) (*CnsVolumeInfo, string, error) // GetOperationStore returns the VolumeOperationRequest interface GetOperationStore() cnsvolumeoperationrequest.VolumeOperationRequest + // LogoutListViewVCSession logout current vCenter session for list-view + LogoutListViewVCSession(ctx context.Context) error } // CnsVolumeInfo hold information related to volume created by CNS. @@ -305,10 +313,7 @@ func (m *defaultManager) ResetManager(ctx context.Context, vcenter *cnsvsphere.V log.Infof("Re-initializing defaultManager.virtualCenter") managerInstance.virtualCenter = vcenter if m.tasksListViewEnabled { - err := m.listViewIf.SetVirtualCenter(ctx, managerInstance.virtualCenter) - if err != nil { - return logger.LogNewErrorf(log, "failed to set virtual center to listView instance. err: %v", err) - } + m.listViewIf.SetVirtualCenter(ctx, managerInstance.virtualCenter) } if m.virtualCenter.Client != nil { m.virtualCenter.Client.Timeout = time.Duration(vcenter.Config.VCClientTimeout) * time.Minute @@ -386,6 +391,7 @@ func (m *defaultManager) MonitorCreateVolumeTask(ctx context.Context, return nil, ExtractFaultTypeFromErr(ctx, err), err } + // WaitForResult can fail for many reasons, including: // - CNS restarted and marked "InProgress" tasks as "Failed". // - Any failures from CNS. @@ -494,9 +500,9 @@ func (m *defaultManager) createVolumeWithImprovedIdempotency(ctx context.Context }, }, "", nil } + // Validate if previous operation is pending. - if volumeOperationDetails.OperationDetails.TaskStatus == taskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + if IsTaskPending(volumeOperationDetails) { log.Infof("Volume with name %s has CreateVolume task %s pending on CNS.", volNameFromInputSpec, volumeOperationDetails.OperationDetails.TaskID) @@ -563,6 +569,21 @@ func (m *defaultManager) createVolumeWithImprovedIdempotency(ctx context.Context return resp, faultType, err } +// IsTaskPending returns true in two cases - +// 1. if the task status was in progress +// 2. if the status was an error but the error was for adding the task to the listview +// (as we don't know the status of the task on CNS) +func IsTaskPending(volumeOperationDetails *cnsvolumeoperationrequest.VolumeOperationRequestDetails) bool { + if volumeOperationDetails.OperationDetails.TaskStatus == taskInvocationStatusInProgress && + volumeOperationDetails.OperationDetails.TaskID != "" { + return true + } else if volumeOperationDetails.OperationDetails.TaskStatus == taskInvocationStatusError && + strings.Contains(volumeOperationDetails.OperationDetails.Error, listviewAdditionError) { + return true + } + return false +} + func (m *defaultManager) waitOnTask(csiOpContext context.Context, taskMoRef vim25types.ManagedObjectReference) (*vim25types.TaskInfo, error) { log := logger.GetLogger(csiOpContext) @@ -575,7 +596,7 @@ func (m *defaultManager) waitOnTask(csiOpContext context.Context, ch := make(chan TaskResult) err := m.listViewIf.AddTask(csiOpContext, taskMoRef, ch) if err != nil { - return nil, logger.LogNewErrorf(log, "failed to add task to list view. err: %v", err) + return nil, logger.LogNewErrorf(log, "%s. err: %v", listviewAdditionError, err) } // deferring removal of task after response from CNS @@ -590,7 +611,6 @@ func (m *defaultManager) waitOnTask(csiOpContext context.Context, } } }() - return waitForResultOrTimeout(csiOpContext, taskMoRef, ch) } @@ -614,6 +634,7 @@ func waitForResultOrTimeout(csiOpContext context.Context, taskMoRef vim25types.M func (m *defaultManager) initListView() error { ctx := logger.NewContextWithLogger(context.Background()) + log := logger.GetLogger(ctx) log.Debugf("Initializing new listView object for vc: %+v", m.virtualCenter) if m.virtualCenter.Client == nil { @@ -624,7 +645,18 @@ func (m *defaultManager) initListView() error { } } - govmomiClient, err := m.virtualCenter.NewClient(ctx) + err := cnsvsphere.ReadVCConfigs(ctx, m.virtualCenter) + if err != nil { + return logger.LogNewErrorf(log, "failed to read VC config. err: %v", err) + } + + useragent, err := config.GetSessionUserAgent(ctx) + if err != nil { + return logger.LogNewErrorf(log, "failed to get useragent for vCenter session. error: %+v", err) + } + useragent = useragent + "-listview" + + govmomiClient, err := m.virtualCenter.NewClient(ctx, useragent) if err != nil { return logger.LogNewErrorf(log, "failed to create a separate govmomi client for listView. error: %+v", err) } @@ -740,6 +772,8 @@ func (m *defaultManager) createVolume(ctx context.Context, spec *cnstypes.CnsVol // CreateVolume creates a new volume given its spec. func (m *defaultManager) CreateVolume(ctx context.Context, spec *cnstypes.CnsVolumeCreateSpec) (*CnsVolumeInfo, string, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalCreateVolume := func() (*CnsVolumeInfo, string, error) { log := logger.GetLogger(ctx) var faultType string @@ -776,9 +810,23 @@ func (m *defaultManager) CreateVolume(ctx context.Context, spec *cnstypes.CnsVol return resp, faultType, err } +// ensureOperationContextHasATimeout checks if the passed context has a timeout associated with it. +// If there is no timeout set, we set it to 300 seconds. This is the same as set by sidecars. +// If a timeout is already set, we don't change it. +func ensureOperationContextHasATimeout(ctx context.Context) (context.Context, context.CancelFunc) { + _, ok := ctx.Deadline() + if !ok { + // no timeout is set, so we need to set it + return context.WithTimeout(ctx, VolumeOperationTimeoutInSeconds*time.Second) + } + return context.WithCancel(ctx) +} + // AttachVolume attaches a volume to a virtual machine given the spec. func (m *defaultManager) AttachVolume(ctx context.Context, vm *cnsvsphere.VirtualMachine, volumeID string, checkNVMeController bool) (string, string, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalAttachVolume := func() (string, string, error) { log := logger.GetLogger(ctx) var faultType string @@ -885,6 +933,8 @@ func (m *defaultManager) AttachVolume(ctx context.Context, // DetachVolume detaches a volume from the virtual machine given the spec. func (m *defaultManager) DetachVolume(ctx context.Context, vm *cnsvsphere.VirtualMachine, volumeID string) (string, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalDetachVolume := func() (string, error) { log := logger.GetLogger(ctx) var faultType string @@ -1025,6 +1075,8 @@ func (m *defaultManager) DetachVolume(ctx context.Context, vm *cnsvsphere.Virtua // DeleteVolume deletes a volume given its spec. func (m *defaultManager) DeleteVolume(ctx context.Context, volumeID string, deleteDisk bool) (string, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalDeleteVolume := func() (string, error) { log := logger.GetLogger(ctx) var faultType string @@ -1115,6 +1167,12 @@ func (m *defaultManager) deleteVolume(ctx context.Context, volumeID string, dele volumeOperationRes := taskResult.GetCnsVolumeOperationResult() if volumeOperationRes.Fault != nil { faultType = ExtractFaultTypeFromVolumeResponseResult(ctx, volumeOperationRes) + // If volume is not found on host, but is present in CNS DB, we will get vim.fault.NotFound fault. + // Send back success as the volume is already deleted. + if IsNotFoundFault(ctx, faultType) { + log.Infof("DeleteVolume: VolumeID %q, not found, thus returning success", volumeID) + return "", nil + } return faultType, logger.LogNewErrorf(log, "failed to delete volume: %q, fault: %q, opID: %q", volumeID, spew.Sdump(volumeOperationRes.Fault), taskInfo.ActivationId) } @@ -1157,8 +1215,7 @@ func (m *defaultManager) deleteVolumeWithImprovedIdempotency(ctx context.Context return "", nil } // Validate if previous operation is pending. - if volumeOperationDetails.OperationDetails.TaskStatus == taskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + if IsTaskPending(volumeOperationDetails) { taskMoRef := vim25types.ManagedObjectReference{ Type: "Task", Value: volumeOperationDetails.OperationDetails.TaskID, @@ -1275,6 +1332,14 @@ func (m *defaultManager) deleteVolumeWithImprovedIdempotency(ctx context.Context volumeOperationRes := taskResult.GetCnsVolumeOperationResult() if volumeOperationRes.Fault != nil { faultType = ExtractFaultTypeFromVolumeResponseResult(ctx, volumeOperationRes) + + // If volume is not found on host, but is present in CNS DB, we will get vim.fault.NotFound fault. + // In such a case, send back success as the volume is already deleted. + if IsNotFoundFault(ctx, faultType) { + log.Infof("DeleteVolume: VolumeID %q, not found, thus returning success", volumeID) + return "", nil + } + msg := fmt.Sprintf("failed to delete volume: %q, fault: %q, opID: %q", volumeID, spew.Sdump(volumeOperationRes.Fault), taskInfo.ActivationId) volumeOperationDetails = createRequestDetails(instanceName, "", "", 0, @@ -1293,6 +1358,8 @@ func (m *defaultManager) deleteVolumeWithImprovedIdempotency(ctx context.Context // UpdateVolumeMetadata updates a volume given its spec. func (m *defaultManager) UpdateVolumeMetadata(ctx context.Context, spec *cnstypes.CnsVolumeMetadataUpdateSpec) error { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalUpdateVolumeMetadata := func() error { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -1382,6 +1449,8 @@ func (m *defaultManager) UpdateVolumeMetadata(ctx context.Context, spec *cnstype // ExpandVolume expands a volume given its spec. func (m *defaultManager) ExpandVolume(ctx context.Context, volumeID string, size int64) (string, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalExpandVolume := func() (string, error) { log := logger.GetLogger(ctx) var faultType string @@ -1513,8 +1582,7 @@ func (m *defaultManager) expandVolumeWithImprovedIdempotency(ctx context.Context log.Infof("Volume with ID %s already expanded to size %v", volumeID, size) return "", nil } - if volumeOperationDetails.OperationDetails.TaskStatus == taskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + if IsTaskPending(volumeOperationDetails) { log.Infof("Volume with ID %s has ExtendVolume task %s pending on CNS.", volumeID, volumeOperationDetails.OperationDetails.TaskID) @@ -1671,6 +1739,8 @@ func (m *defaultManager) expandVolumeWithImprovedIdempotency(ctx context.Context // QueryVolume returns volumes matching the given filter. func (m *defaultManager) QueryVolume(ctx context.Context, queryFilter cnstypes.CnsQueryFilter) (*cnstypes.CnsQueryResult, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalQueryVolume := func() (*cnstypes.CnsQueryResult, error) { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -1707,6 +1777,8 @@ func (m *defaultManager) QueryVolume(ctx context.Context, // QueryAllVolume returns all volumes matching the given filter and selection. func (m *defaultManager) QueryAllVolume(ctx context.Context, queryFilter cnstypes.CnsQueryFilter, querySelection cnstypes.CnsQuerySelection) (*cnstypes.CnsQueryResult, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalQueryAllVolume := func() (*cnstypes.CnsQueryResult, error) { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -1744,6 +1816,8 @@ func (m *defaultManager) QueryAllVolume(ctx context.Context, queryFilter cnstype // which CnsQueryVolumeInfoResult is extracted. func (m *defaultManager) QueryVolumeInfo(ctx context.Context, volumeIDList []cnstypes.CnsVolumeId) (*cnstypes.CnsQueryVolumeInfoResult, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalQueryVolumeInfo := func() (*cnstypes.CnsQueryVolumeInfoResult, error) { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -1811,6 +1885,8 @@ func (m *defaultManager) QueryVolumeInfo(ctx context.Context, func (m *defaultManager) RelocateVolume(ctx context.Context, relocateSpecList ...cnstypes.BaseCnsVolumeRelocateSpec) (*object.Task, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalRelocateVolume := func() (*object.Task, error) { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -1846,6 +1922,8 @@ func (m *defaultManager) RelocateVolume(ctx context.Context, // ConfigureVolumeACLs configures net permissions for a given CnsVolumeACLConfigureSpec. func (m *defaultManager) ConfigureVolumeACLs(ctx context.Context, spec cnstypes.CnsVolumeACLConfigureSpec) error { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalConfigureVolumeACLs := func() error { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -1982,6 +2060,8 @@ func (m *defaultManager) RetrieveVStorageObject(ctx context.Context, // parameters are not specified. func (m *defaultManager) QueryVolumeAsync(ctx context.Context, queryFilter cnstypes.CnsQueryFilter, querySelection *cnstypes.CnsQuerySelection) (*cnstypes.CnsQueryResult, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() log := logger.GetLogger(ctx) err := validateManager(ctx, m) if err != nil { @@ -2046,6 +2126,8 @@ func (m *defaultManager) QueryVolumeAsync(ctx context.Context, queryFilter cnsty func (m *defaultManager) QuerySnapshots(ctx context.Context, snapshotQueryFilter cnstypes.CnsSnapshotQueryFilter) ( *cnstypes.CnsSnapshotQueryResult, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalQuerySnapshots := func() (*cnstypes.CnsSnapshotQueryResult, error) { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -2134,8 +2216,7 @@ func (m *defaultManager) createSnapshotWithImprovedIdempotencyCheck(ctx context. }, nil } // Validate if previous operation is pending. - if volumeOperationDetails.OperationDetails.TaskStatus == taskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + if IsTaskPending(volumeOperationDetails) { log.Infof("Snapshot with name %s has CreateSnapshot task %s pending on CNS.", instanceName, volumeOperationDetails.OperationDetails.TaskID) @@ -2320,6 +2401,8 @@ func (m *defaultManager) createSnapshotWithImprovedIdempotencyCheck(ctx context. // which is generated by the CSI snapshotter sidecar. func (m *defaultManager) CreateSnapshot( ctx context.Context, volumeID string, snapshotName string) (*CnsSnapshotInfo, error) { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalCreateSnapshot := func() (*CnsSnapshotInfo, error) { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -2379,8 +2462,7 @@ func (m *defaultManager) deleteSnapshotWithImprovedIdempotencyCheck( return nil } // Validate if previous operation is pending. - if volumeOperationDetails.OperationDetails.TaskStatus == taskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + if IsTaskPending(volumeOperationDetails) { taskMoRef := vim25types.ManagedObjectReference{ Type: "Task", Value: volumeOperationDetails.OperationDetails.TaskID, @@ -2544,6 +2626,8 @@ func (m *defaultManager) deleteSnapshotWithImprovedIdempotencyCheck( } func (m *defaultManager) DeleteSnapshot(ctx context.Context, volumeID string, snapshotID string) error { + ctx, cancelFunc := ensureOperationContextHasATimeout(ctx) + defer cancelFunc() internalDeleteSnapshot := func() error { log := logger.GetLogger(ctx) err := validateManager(ctx, m) @@ -2596,3 +2680,23 @@ func (m *defaultManager) ProtectVolumeFromVMDeletion(ctx context.Context, volume log.Infof("Successfully set keepAfterDeleteVm control flag for volumeID: %q", volumeID) return nil } + +func (m *defaultManager) LogoutListViewVCSession(ctx context.Context) error { + log := logger.GetLogger(ctx) + if m.listViewIf != nil { + log.Info("Logging out list view vCenter session") + return m.listViewIf.LogoutSession(ctx) + } + return nil +} + +// GetAllManagerInstances returns all Manager instances +func GetAllManagerInstances(ctx context.Context) map[string]*defaultManager { + newManagerInstanceMap := make(map[string]*defaultManager) + if len(managerInstanceMap) != 0 { + newManagerInstanceMap = managerInstanceMap + } else if managerInstance != nil { + newManagerInstanceMap[managerInstance.virtualCenter.Config.Host] = managerInstance + } + return newManagerInstanceMap +} diff --git a/pkg/common/cns-lib/volume/util.go b/pkg/common/cns-lib/volume/util.go index db0e3bd121..8fc0bdf7bc 100644 --- a/pkg/common/cns-lib/volume/util.go +++ b/pkg/common/cns-lib/volume/util.go @@ -37,10 +37,6 @@ import ( "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" ) -const ( - vimFaultPrefix = "vim.fault." -) - func validateManager(ctx context.Context, m *defaultManager) error { log := logger.GetLogger(ctx) if m.virtualCenter == nil { @@ -61,6 +57,9 @@ func IsDiskAttached(ctx context.Context, vm *cnsvsphere.VirtualMachine, volumeID log.Errorf("failed to get devices from vm: %s", vm.InventoryPath) return "", err } + if len(vmDevices) == 0 { + return "", logger.LogNewErrorf(log, "virtual devices list is empty for the vm: %s", vm.InventoryPath) + } // Build a map of NVME Controller key : NVME controller name. // This is needed to check if disk in contention is attached to a NVME // controller. The virtual disk devices do not contain the controller type @@ -350,7 +349,7 @@ func ExtractFaultTypeFromErr(ctx context.Context, err error) string { faultType = reflect.TypeOf(soapFault.VimFault()).String() log.Infof("Extract vimfault type: +%v. SoapFault Info: +%v from err +%v", faultType, soapFault, err) slice := strings.Split(faultType, ".") - vimFaultType := vimFaultPrefix + slice[1] + vimFaultType := csifault.VimFaultPrefix + slice[1] return vimFaultType } log.Infof("err %+v is not a SoapFault\n", err) @@ -374,7 +373,7 @@ func ExtractFaultTypeFromVolumeResponseResult(ctx context.Context, log.Infof("Extract vimfault type: %+v vimFault: %+v Fault: %+v from resp: %+v", faultType, fault.Fault, fault, resp) slice := strings.Split(faultType, ".") - vimFaultType := vimFaultPrefix + slice[1] + vimFaultType := csifault.VimFaultPrefix + slice[1] return vimFaultType } else { faultType = reflect.TypeOf(fault).String() @@ -550,3 +549,11 @@ func queryCreatedSnapshotByName(ctx context.Context, m *defaultManager, volumeID } return nil, false } + +// IsNotFoundFault returns true if a given faultType value is vim.fault.NotFound +func IsNotFoundFault(ctx context.Context, faultType string) bool { + log := logger.GetLogger(ctx) + log.Infof("Checking fault type: %q is vim.fault.NotFound", faultType) + return faultType == "vim.fault.NotFound" + +} diff --git a/pkg/common/cns-lib/vsphere/cluster_compute_resource.go b/pkg/common/cns-lib/vsphere/cluster_compute_resource.go new file mode 100644 index 0000000000..c2d8244af5 --- /dev/null +++ b/pkg/common/cns-lib/vsphere/cluster_compute_resource.go @@ -0,0 +1,50 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere + +import ( + "context" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" +) + +// ClusterComputeResource holds details of a cluster instance. +type ClusterComputeResource struct { + // ClusterComputeResource represents a vSphere cluster. + *object.ClusterComputeResource + // VirtualCenterHost denotes the virtual center host address. + VirtualCenterHost string +} + +// GetHosts fetches the hosts under the ClusterComputeResource. +func (ccr *ClusterComputeResource) GetHosts(ctx context.Context) ([]*HostSystem, error) { + log := logger.GetLogger(ctx) + cluster := mo.ClusterComputeResource{} + err := ccr.Properties(ctx, ccr.Reference(), []string{"host"}, &cluster) + if err != nil { + return nil, logger.LogNewErrorf(log, + "failed to retrieve host property for cluster %+v", ccr.Reference()) + } + var hostList []*HostSystem + for _, host := range cluster.Host { + hostList = append(hostList, &HostSystem{HostSystem: object.NewHostSystem(ccr.Client(), host)}) + } + return hostList, nil +} diff --git a/pkg/common/cns-lib/vsphere/hostsystem.go b/pkg/common/cns-lib/vsphere/hostsystem.go index 1547e02af2..f8670050e4 100644 --- a/pkg/common/cns-lib/vsphere/hostsystem.go +++ b/pkg/common/cns-lib/vsphere/hostsystem.go @@ -19,6 +19,7 @@ package vsphere import ( "context" "encoding/json" + "errors" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" @@ -29,6 +30,8 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +var ErrNoSharedDSFound = errors.New("no shared datastores found among given hosts") + // HostSystem holds details of a host instance. type HostSystem struct { // HostSystem represents the host system. @@ -40,8 +43,7 @@ type HostSystem struct { func (host *HostSystem) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) { log := logger.GetLogger(ctx) var hostSystemMo mo.HostSystem - s := object.NewSearchIndex(host.Client()) - err := s.Properties(ctx, host.Reference(), []string{"datastore"}, &hostSystemMo) + err := host.Properties(ctx, host.Reference(), []string{"datastore"}, &hostSystemMo) if err != nil { log.Errorf("failed to retrieve datastores for host %v with err: %v", host, err) return nil, err @@ -182,3 +184,38 @@ func (host *HostSystem) GetHostVsanCapacity(ctx context.Context) (*VsanHostCapac } return &out, nil } + +// GetSharedDatastoresForHosts returns shared datastores accessible to hosts mentioned in the input parameter. +func GetSharedDatastoresForHosts(ctx context.Context, hosts []*HostSystem) ([]*DatastoreInfo, error) { + log := logger.GetLogger(ctx) + var sharedDatastores []*DatastoreInfo + + for _, host := range hosts { + accessibleDatastores, err := host.GetAllAccessibleDatastores(ctx) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to fetch datastores from host %+v. Error: %+v", + host, err) + } + if len(sharedDatastores) == 0 { + sharedDatastores = accessibleDatastores + } else { + var sharedAccessibleDatastores []*DatastoreInfo + for _, sharedDs := range sharedDatastores { + // Check if sharedDatastores is found in accessibleDatastores. + for _, accessibleDs := range accessibleDatastores { + // Intersection is performed based on the datastoreUrl as this + // uniquely identifies the datastore. + if accessibleDs.Info.Url == sharedDs.Info.Url { + sharedAccessibleDatastores = append(sharedAccessibleDatastores, sharedDs) + break + } + } + } + sharedDatastores = sharedAccessibleDatastores + } + if len(sharedDatastores) == 0 { + return nil, ErrNoSharedDSFound + } + } + return sharedDatastores, nil +} diff --git a/pkg/common/cns-lib/vsphere/utils.go b/pkg/common/cns-lib/vsphere/utils.go index fd39b38d6b..f59461b5d0 100644 --- a/pkg/common/cns-lib/vsphere/utils.go +++ b/pkg/common/cns-lib/vsphere/utils.go @@ -382,10 +382,12 @@ func signer(ctx context.Context, client *vim25.Client, username string, password // GetTagManager returns tagManager connected to given VirtualCenter. func GetTagManager(ctx context.Context, vc *VirtualCenter) (*tags.Manager, error) { + log := logger.GetLogger(ctx) // Validate input. if vc == nil || vc.Client == nil || vc.Client.Client == nil { return nil, fmt.Errorf("vCenter not initialized") } + restClient := rest.NewClient(vc.Client.Client) signer, err := signer(ctx, vc.Client.Client, vc.Config.Username, vc.Config.Password) if err != nil { @@ -404,6 +406,7 @@ func GetTagManager(ctx context.Context, vc *VirtualCenter) (*tags.Manager, error if tagManager == nil { return nil, fmt.Errorf("failed to create a tagManager") } + log.Infof("New tag manager with useragent '%s'", tagManager.UserAgent) return tagManager, nil } diff --git a/pkg/common/cns-lib/vsphere/virtualcenter.go b/pkg/common/cns-lib/vsphere/virtualcenter.go index 6f5d5ae801..be0fd71445 100644 --- a/pkg/common/cns-lib/vsphere/virtualcenter.go +++ b/pkg/common/cns-lib/vsphere/virtualcenter.go @@ -28,23 +28,23 @@ import ( "sync" "time" - "github.com/vmware/govmomi/cns" - "github.com/vmware/govmomi/property" - "github.com/vmware/govmomi/vsan" - "github.com/vmware/govmomi/vslm" - "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" - "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" - "github.com/vmware/govmomi" + "github.com/vmware/govmomi/cns" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/pbm" + "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/sts" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vsan" + "github.com/vmware/govmomi/vslm" + + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" ) const ( @@ -141,7 +141,7 @@ type VirtualCenterConfig struct { } // NewClient creates a new govmomi Client instance. -func (vc *VirtualCenter) NewClient(ctx context.Context) (*govmomi.Client, error) { +func (vc *VirtualCenter) NewClient(ctx context.Context, useragent string) (*govmomi.Client, error) { log := logger.GetLogger(ctx) if vc.Config.Scheme == "" { vc.Config.Scheme = DefaultScheme @@ -177,7 +177,7 @@ func (vc *VirtualCenter) NewClient(ctx context.Context) (*govmomi.Client, error) log.Errorf("Failed to set vimClient service version to vsan. err: %v", err) return nil, err } - vimClient.UserAgent = "k8s-csi-useragent" + vimClient.UserAgent = useragent client := &govmomi.Client{ Client: vimClient, SessionManager: session.NewManager(vimClient), @@ -185,6 +185,7 @@ func (vc *VirtualCenter) NewClient(ctx context.Context) (*govmomi.Client, error) err = vc.login(ctx, client) if err != nil { + log.Errorf("failed to login to vc. err: %v", err) return nil, err } @@ -279,9 +280,20 @@ func (vc *VirtualCenter) connect(ctx context.Context, requestNewSession bool) er // If client was never initialized, initialize one. var err error + useragent, err := config.GetSessionUserAgent(ctx) + if err != nil { + log.Errorf("failed to get useragent for vCenter session. error: %+v", err) + return err + } if vc.Client == nil { + if vc.Config.ReloadVCConfigForNewClient { + err = ReadVCConfigs(ctx, vc) + if err != nil { + return err + } + } log.Infof("VirtualCenter.connect() creating new client") - if vc.Client, err = vc.NewClient(ctx); err != nil { + if vc.Client, err = vc.NewClient(ctx, useragent); err != nil { log.Errorf("failed to create govmomi client with err: %v", err) if !vc.Config.Insecure { log.Errorf("failed to connect to vCenter using CA file: %q", vc.Config.CAFile) @@ -307,30 +319,12 @@ func (vc *VirtualCenter) connect(ctx context.Context, requestNewSession bool) er // If session has expired, create a new instance. log.Infof("Creating a new client session as the existing one isn't valid or not authenticated") if vc.Config.ReloadVCConfigForNewClient { - log.Info("Reloading latest VC config from vSphere Config Secret") - cfg, err := config.GetConfig(ctx) - if err != nil { - return logger.LogNewErrorf(log, "failed to read config. Error: %+v", err) - } - var foundVCConfig bool - newVcenterConfigs, err := GetVirtualCenterConfigs(ctx, cfg) + err = ReadVCConfigs(ctx, vc) if err != nil { - return logger.LogNewErrorf(log, "failed to get VirtualCenterConfigs. err=%v", err) - } - for _, newvcconfig := range newVcenterConfigs { - if newvcconfig.Host == vc.Config.Host { - newvcconfig.ReloadVCConfigForNewClient = true - vc.Config = newvcconfig - log.Infof("Successfully set latest VC config for vcenter: %q", vc.Config.Host) - foundVCConfig = true - break - } - } - if !foundVCConfig { - return logger.LogNewErrorf(log, "failed to get vCenter config for Host: %q", vc.Config.Host) + return err } } - if vc.Client, err = vc.NewClient(ctx); err != nil { + if vc.Client, err = vc.NewClient(ctx, useragent); err != nil { log.Errorf("failed to create govmomi client with err: %v", err) if !vc.Config.Insecure { log.Errorf("failed to connect to vCenter using CA file: %q", vc.Config.CAFile) @@ -370,6 +364,37 @@ func (vc *VirtualCenter) connect(ctx context.Context, requestNewSession bool) er return nil } +// ReadVCConfigs will ensure we are always reading the latest config +// before attempting to create a new govmomi client. +// It works in case of both vanilla (including multi-vc) and wcp +func ReadVCConfigs(ctx context.Context, vc *VirtualCenter) error { + log := logger.GetLogger(ctx) + log.Infof("Reloading latest VC config from vSphere Config Secret for vcenter: %q", vc.Config.Host) + cfg, err := config.GetConfig(ctx) + if err != nil { + return logger.LogNewErrorf(log, "failed to read config. Error: %+v", err) + } + var foundVCConfig bool + newVcenterConfigs, err := GetVirtualCenterConfigs(ctx, cfg) + if err != nil { + return logger.LogNewErrorf(log, "failed to get VirtualCenterConfigs. err=%v", err) + } + for _, newvcconfig := range newVcenterConfigs { + if newvcconfig.Host == vc.Config.Host { + newvcconfig.ReloadVCConfigForNewClient = true + vc.Config = newvcconfig + log.Infof("Successfully set latest VC config for vcenter: %q", vc.Config.Host) + foundVCConfig = true + break + } + } + if !foundVCConfig { + return logger.LogNewErrorf(log, "failed to get vCenter config for Host: %q", vc.Config.Host) + } + + return nil +} + // ListDatacenters returns all Datacenters. func (vc *VirtualCenter) ListDatacenters(ctx context.Context) ( []*Datacenter, error) { diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index dd008cc000..942354ba24 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "os" + "regexp" "strconv" "strings" @@ -107,6 +108,11 @@ var ( // ErrUsernameMissing is returned when the provided username is empty. ErrUsernameMissing = errors.New("username is missing") + // ErrInvalidUsername is returned when vCenter username provided in vSphere config + // secret is invalid. e.g. If username is not a fully qualified domain name, then + // it will be considered as invalid username. + ErrInvalidUsername = errors.New("username is invalid, make sure it is a fully qualified domain username") + // ErrPasswordMissing is returned when the provided password is empty. ErrPasswordMissing = errors.New("password is missing") @@ -322,6 +328,18 @@ func FromEnv(ctx context.Context, cfg *Config) error { return nil } +// Check if username is valid or not. If username is not a fully qualified domain name, then +// we consider it as an invalid username. +func isValidvCenterUsernameWithDomain(username string) bool { + // Regular expression to validate vCenter server username. + // Allowed username is in the format "userName@domainName" or "domainName\\userName". + // If domain name is not provided in username, then functions like HasUserPrivilegeOnEntities + // doesn't return any entity for given user and eventually volume creation fails. + regex := `^(?:[a-zA-Z0-9.-]+\\[a-zA-Z0-9._-]+|[a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+)$` + match, _ := regexp.MatchString(regex, username) + return match +} + func validateConfig(ctx context.Context, cfg *Config) error { log := logger.GetLogger(ctx) // Fix default global values. @@ -369,6 +387,15 @@ func validateConfig(ctx context.Context, cfg *Config) error { return ErrUsernameMissing } } + + // vCenter server username provided in vSphere config secret should contain domain name, + // CSI driver will crash if username doesn't contain domain name. + if !isValidvCenterUsernameWithDomain(vcConfig.User) { + log.Errorf("username %v specified in vSphere config secret is invalid, "+ + "make sure that username is a fully qualified domain name.", vcConfig.User) + return ErrInvalidUsername + } + if vcConfig.Password == "" { vcConfig.Password = cfg.Global.Password if vcConfig.Password == "" { @@ -401,7 +428,7 @@ func validateConfig(ctx context.Context, cfg *Config) error { } if cfg.NetPermissions == nil { // If no net permissions are given, assume default. - log.Info("No Net Permissions given in Config. Using default permissions.") + log.Debug("No Net Permissions given in Config. Using default permissions.") if clusterFlavor == cnstypes.CnsClusterFlavorVanilla { cfg.NetPermissions = map[string]*NetPermissionConfig{"#": GetDefaultNetPermission()} } @@ -735,3 +762,31 @@ func GetConfigPath(ctx context.Context) string { } return cfgPath } + +// GetSessionUserAgent returns clusterwise unique useragent +func GetSessionUserAgent(ctx context.Context) (string, error) { + log := logger.GetLogger(ctx) + clusterFlavor, err := GetClusterFlavor(ctx) + if err != nil { + log.Errorf("failed retrieving cluster flavor. Error: %+v", err) + return "", err + } + cfg, err := GetConfig(ctx) + if err != nil { + log.Errorf("failed to read config. Error: %+v", err) + return "", err + } + useragent := "k8s-csi-useragent" + if clusterFlavor == cnstypes.CnsClusterFlavorVanilla { + if cfg.Global.ClusterID != "" { + useragent = useragent + "-" + cfg.Global.ClusterID + } else { + useragent = useragent + "-" + GeneratedVanillaClusterID + } + } else if clusterFlavor == cnstypes.CnsClusterFlavorWorkload { + if cfg.Global.SupervisorID != "" { + useragent = useragent + "-" + cfg.Global.SupervisorID + } + } + return useragent, nil +} diff --git a/pkg/common/config/config_test.go b/pkg/common/config/config_test.go index df8de3e787..23e23c77e2 100644 --- a/pkg/common/config/config_test.go +++ b/pkg/common/config/config_test.go @@ -35,7 +35,7 @@ func init() { defer cancel() idealVCConfig = map[string]*VirtualCenterConfig{ "1.1.1.1": { - User: "Admin", + User: "Administrator@vsphere.local", Password: "Password", VCenterPort: "443", Datacenters: "dc1", @@ -159,6 +159,66 @@ func TestValidateConfigWithInvalidClusterId(t *testing.T) { } } +func TestValidateConfigWithInvalidUsername(t *testing.T) { + vcConfigInvalidUsername := map[string]*VirtualCenterConfig{ + "1.1.1.1": { + User: "Administrator", + Password: "Password", + VCenterPort: "443", + Datacenters: "dc1", + InsecureFlag: true, + }, + } + cfg := &Config{ + VirtualCenter: vcConfigInvalidUsername, + } + + err := validateConfig(ctx, cfg) + if err == nil { + t.Errorf("Expected error due to invalid username. Config given - %+v", *cfg) + } +} + +func TestValidateConfigWithValidUsername1(t *testing.T) { + vcConfigValidUsername := map[string]*VirtualCenterConfig{ + "1.1.1.1": { + User: "Administrator@vsphere.local", + Password: "Password", + VCenterPort: "443", + Datacenters: "dc1", + InsecureFlag: true, + }, + } + cfg := &Config{ + VirtualCenter: vcConfigValidUsername, + } + + err := validateConfig(ctx, cfg) + if err != nil { + t.Errorf("Unexpected error, as valid username is specified. Config given - %+v", *cfg) + } +} + +func TestValidateConfigWithValidUsername2(t *testing.T) { + vcConfigValidUsername := map[string]*VirtualCenterConfig{ + "1.1.1.1": { + User: "vsphere.local\\Administrator", + Password: "Password", + VCenterPort: "443", + Datacenters: "dc1", + InsecureFlag: true, + }, + } + cfg := &Config{ + VirtualCenter: vcConfigValidUsername, + } + + err := validateConfig(ctx, cfg) + if err != nil { + t.Errorf("Unexpected error, as valid username is specified. Config given - %+v", *cfg) + } +} + func TestSnapshotConfigWhenMaxUnspecified(t *testing.T) { cfg := &Config{ VirtualCenter: idealVCConfig, diff --git a/pkg/common/fault/constants.go b/pkg/common/fault/constants.go index b3ce20b408..1c5612927c 100644 --- a/pkg/common/fault/constants.go +++ b/pkg/common/fault/constants.go @@ -19,6 +19,11 @@ const ( // CSITaskInfoEmptyFault is the fault type when taskInfo is empty. CSITaskInfoEmptyFault = "csi.fault.TaskInfoEmpty" + // CSINonStorageFaultPrefix is the prefix used for faults originating due to components other than + // downstream vSphere storage stack. + CSINonStorageFaultPrefix = "csi.fault.nonstorage." + // VimFaultPrefix is the prefix used for vim faults from downstream components. + VimFaultPrefix = "vim.fault." // CSIVmUuidNotFoundFault is the fault type when Pod VMs do not have the vmware-system-vm-uuid annotation. CSIVmUuidNotFoundFault = "csi.fault.nonstorage.VmUuidNotFound" // CSIVmNotFoundFault is the fault type when VM object is not found in the VC @@ -63,4 +68,13 @@ const ( CSIUnimplementedFault = "csi.fault.Unimplemented" // CSIInvalidStoragePolicyConfigurationFault is the fault type returned when the user provides invalid storage policy. CSIInvalidStoragePolicyConfigurationFault = "csi.fault.invalidconfig.InvalidStoragePolicyConfiguration" + + // Below is the list of faults coming from downstream vCenter components that we want to classify + // as non-storage faults. + + // VimFaultInvalidHostState is the fault returned from CNS when host is not in a state to perform the volume + // operation e.g. maintenance mode. + VimFaultInvalidHostState = VimFaultPrefix + "InvalidHostState" + // VimFaultHostNotConnected is the fault returned from CNS when host is not connected. + VimFaultHostNotConnected = VimFaultPrefix + "HostNotConnected" ) diff --git a/pkg/common/fault/util.go b/pkg/common/fault/util.go new file mode 100644 index 0000000000..0284adbfcf --- /dev/null +++ b/pkg/common/fault/util.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package fault + +import ( + "context" + + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" +) + +var ( + // VimNonStorageFaultsList contains the list of faults that needs to classified as non-storage faults. + VimNonStorageFaultsList = []string{VimFaultInvalidHostState, VimFaultHostNotConnected} +) + +// IsNonStorageFault checks if the fault type is in a pre-defined list of non-storage faults +// and returns a bool value accordingly. +func IsNonStorageFault(fault string) bool { + for _, nonStorageFault := range VimNonStorageFaultsList { + if nonStorageFault == fault { + return true + } + } + return false +} + +// AddCsiNonStoragePrefix adds "csi.fault.nonstorage." prefix to the faults. +func AddCsiNonStoragePrefix(ctx context.Context, fault string) string { + log := logger.GetLogger(ctx) + if fault != "" { + log.Infof("Adding %q prefix to fault %q", CSINonStorageFaultPrefix, fault) + return CSINonStorageFaultPrefix + fault + } + return fault +} diff --git a/pkg/common/unittestcommon/utils.go b/pkg/common/unittestcommon/utils.go index d0625b9dee..6c14ee8bf5 100644 --- a/pkg/common/unittestcommon/utils.go +++ b/pkg/common/unittestcommon/utils.go @@ -54,6 +54,16 @@ func GetFakeContainerOrchestratorInterface(orchestratorType int) (commonco.COCom "tkgs-ha": "true", "list-volumes": "true", "csi-internal-generated-cluster-id": "true", + "online-volume-extend": "true", + "async-query-volume": "true", + "csi-windows-support": "true", + "use-csinode-id": "true", + "pv-to-backingdiskobjectid-mapping": "false", + "cnsmgr-suspend-create-volume": "true", + "topology-preferential-datastores": "true", + "max-pvscsi-targets-per-vm": "true", + "multi-vcenter-csi-topology": "true", + "listview-tasks": "true", }, } return fakeCO, nil @@ -294,3 +304,8 @@ func (c *FakeK8SOrchestrator) GetCSINodeTopologyInstanceByName(nodeName string) item interface{}, exists bool, err error) { return nil, false, nil } + +// GetPVCNamespaceFromVolumeID retrieves the pv name from volumeID. +func (c *FakeK8SOrchestrator) GetPVNameFromCSIVolumeID(volumeID string) (string, bool) { + return "", false +} diff --git a/pkg/common/utils/utils.go b/pkg/common/utils/utils.go index 1a3e386aed..03671f8299 100644 --- a/pkg/common/utils/utils.go +++ b/pkg/common/utils/utils.go @@ -22,7 +22,6 @@ import ( "strconv" cnstypes "github.com/vmware/govmomi/cns/types" - "github.com/vmware/govmomi/vim25/types" "google.golang.org/grpc/codes" cnsvolume "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere" @@ -225,44 +224,34 @@ func QueryVolumeDetailsUtil(ctx context.Context, m cnsvolume.Manager, volumeIds return volumeDetailsMap, nil } -// GetDatastoreRefByURLFromGivenDatastoreList fetches the datastore reference by datastore URL -// from a list of datastore references. -// If the datastore with dsURL can be found in the same datacenter as the given VC -// and it is also found in the given datastoreList, return the reference of the datastore. -// Otherwise, return error. -func GetDatastoreRefByURLFromGivenDatastoreList(ctx context.Context, vc *cnsvsphere.VirtualCenter, - datastoreList []types.ManagedObjectReference, dsURL string) (*types.ManagedObjectReference, error) { +// LogoutAllSessions will logout all vCenter sessions and disconnect vCenter client +func LogoutAllvCenterSessions(ctx context.Context) { log := logger.GetLogger(ctx) - // Get all datacenters in the virtualcenter - datacenters, err := vc.GetDatacenters(ctx) - if err != nil { - log.Errorf("failed to find datacenters from VC: %q, Error: %+v", vc.Config.Host, err) - return nil, err - } - var candidateDsObj *cnsvsphere.Datastore - // traverse each datacenter and find the datastore with the specified dsURL - for _, datacenter := range datacenters { - candidateDsInfoObj, err := datacenter.GetDatastoreInfoByURL(ctx, dsURL) - if err != nil { - log.Errorf("failed to find datastore with URL %q in datacenter %q from VC %q, Error: %+v", - dsURL, datacenter.InventoryPath, vc.Config.Host, err) + log.Info("Logging out all vCenter sessions") + virtualcentermanager := cnsvsphere.GetVirtualCenterManager(ctx) + vCenters := virtualcentermanager.GetAllVirtualCenters() + managerInstanceMap := cnsvolume.GetAllManagerInstances(ctx) + for _, vc := range vCenters { + if vc.Client == nil { continue } - candidateDsObj = candidateDsInfoObj.Datastore - break - } - if candidateDsObj == nil { - // fail if the candidate datastore is not found in the virtualcenter - return nil, logger.LogNewErrorf(log, - "failed to find datastore with URL %q in VC %q", dsURL, vc.Config.Host) - } - - for _, datastoreRef := range datastoreList { - if datastoreRef == candidateDsObj.Reference() { - log.Infof("compatible datastore found, dsURL = %q, dsRef = %v", dsURL, datastoreRef) - return &datastoreRef, nil + log.Info("Closing idle vCenter session") + vc.Client.CloseIdleConnections() + // logout vCenter session for list-view + mgr, ok := managerInstanceMap[vc.Config.Host] + if ok && mgr != nil { + err := mgr.LogoutListViewVCSession(ctx) + if err != nil { + continue + } + } + log.Infof("Disconnecting vCenter client for host %s", vc.Config.Host) + err := vc.Disconnect(ctx) + if err != nil { + log.Errorf("Error while disconnect vCenter client for host %s. Error: %+v", vc.Config.Host, err) + continue } + log.Infof("Disconnected vCenter client for host %s", vc.Config.Host) } - return nil, logger.LogNewErrorf(log, - "failed to find datastore with URL %q from the input datastore list, %v", dsURL, datastoreList) + log.Info("Successfully logged out vCenter sessions") } diff --git a/pkg/common/utils/utils_test.go b/pkg/common/utils/utils_test.go index 539c7efea4..e107ec4b6d 100644 --- a/pkg/common/utils/utils_test.go +++ b/pkg/common/utils/utils_test.go @@ -9,13 +9,9 @@ import ( "sync" "testing" - "github.com/stretchr/testify/assert" - "github.com/vmware/govmomi/vim25/mo" - cnssim "github.com/vmware/govmomi/cns/simulator" "github.com/vmware/govmomi/cns/types" "github.com/vmware/govmomi/simulator" - vim25types "github.com/vmware/govmomi/vim25/types" cnsvolumes "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/volume" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere" @@ -69,7 +65,7 @@ func configFromCustomizedSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) cfg.Global.VCenterIP = s.URL.Hostname() cfg.Global.VCenterPort = s.URL.Port() - cfg.Global.User = s.URL.User.Username() + cfg.Global.User = s.URL.User.Username() + "@vsphere.local" cfg.Global.Password, _ = s.URL.User.Password() cfg.Global.Datacenters = "DC0" @@ -167,99 +163,3 @@ func TestQuerySnapshotsUtil(t *testing.T) { t.Log(entry) } } - -func TestGetDatastoreRefByURLFromGivenDatastoreList(t *testing.T) { - type funcArgs struct { - ctx context.Context - vc *cnsvsphere.VirtualCenter - dsMoRefList []vim25types.ManagedObjectReference - dsURL string - } - - // Create context - commonUtilsTestInstance := getCommonUtilsTest(t) - - dsReferenceList := simulator.Map.AllReference("Datastore") - var dsEntityList []mo.Entity - var dsMoRefList []vim25types.ManagedObjectReference - for _, dsReference := range dsReferenceList { - dsMoRefList = append(dsMoRefList, dsReference.Reference()) - dsEntityList = append(dsEntityList, dsReference.(mo.Entity)) - } - - // case 2: a list of all datastore MoRef except the last one - dsMoRefListButLastOne := dsMoRefList[:len(dsMoRefList)-1] - - // the datastore url for the last one in the list - dsReferenceFortheLast := dsReferenceList[len(dsReferenceList)-1].Reference() - dsUrl := dsEntityList[len(dsEntityList)-1].(*simulator.Datastore).Info.GetDatastoreInfo().Url - - // an invalid datastore url - invalidDsUrl := "an-invalid-datastore-url" - - tests := []struct { - name string - args funcArgs - expectedDsRef *vim25types.ManagedObjectReference - expectedErr error - }{ - { - name: "CompatibleDatastoreFound", - args: funcArgs{ - ctx: context.TODO(), - vc: commonUtilsTestInstance.vcenter, - dsMoRefList: dsMoRefList, - dsURL: dsUrl, - }, - expectedDsRef: &dsReferenceFortheLast, - expectedErr: nil, - }, - { - name: "FailToFindGivenDatastoreInCompatibleList", - args: funcArgs{ - ctx: context.TODO(), - vc: commonUtilsTestInstance.vcenter, - dsMoRefList: dsMoRefListButLastOne, - dsURL: dsUrl, - }, - expectedDsRef: nil, - expectedErr: fmt.Errorf("failed to find datastore with URL %q from "+ - "the input datastore list, %v", dsUrl, dsMoRefListButLastOne), - }, - { - name: "FailToFindGivenDatastoreInVC", - args: funcArgs{ - ctx: context.TODO(), - vc: commonUtilsTestInstance.vcenter, - dsMoRefList: dsMoRefList, - dsURL: invalidDsUrl, - }, - expectedDsRef: nil, - expectedErr: fmt.Errorf("failed to find datastore with URL %q in VC %q", - invalidDsUrl, commonUtilsTestInstance.vcenter.Config.Host), - }, - { - name: "EmptyDatastoreURLFromInput", - args: funcArgs{ - ctx: context.TODO(), - vc: commonUtilsTestInstance.vcenter, - dsMoRefList: dsMoRefList, - dsURL: "", - }, - expectedDsRef: nil, - expectedErr: fmt.Errorf("failed to find datastore with URL %q in VC %q", - "", commonUtilsTestInstance.vcenter.Config.Host), - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actualDsRef, actualErr := GetDatastoreRefByURLFromGivenDatastoreList( - test.args.ctx, test.args.vc, test.args.dsMoRefList, test.args.dsURL) - assert.Equal(t, test.expectedErr == nil, actualErr == nil) - if test.expectedErr != nil && actualErr != nil { - assert.Equal(t, test.expectedErr.Error(), actualErr.Error()) - } - assert.Equal(t, test.expectedDsRef, actualDsRef) - }) - } -} diff --git a/pkg/csi/service/common/authmanager.go b/pkg/csi/service/common/authmanager.go index ef04ad0406..5fdc028f3a 100644 --- a/pkg/csi/service/common/authmanager.go +++ b/pkg/csi/service/common/authmanager.go @@ -382,9 +382,14 @@ func getDatastoresWithBlockVolumePrivs(ctx context.Context, vc *cnsvsphere.Virtu privIds, entities, userName, vc.Config.Host) return nil, err } - log.Debugf( - "auth manager: HasUserPrivilegeOnEntities returns %v, when checking privileges %v on entities %v for user %s "+ - "and for vCenter %q", result, privIds, entities, userName, vc.Config.Host) + if len(result) == 0 { + log.Infof("auth manager: HasUserPrivilegeOnEntities returned empty result when checking privileges %v "+ + "on entities %v for user %s and for vCenter %q", privIds, entities, userName, vc.Config.Host) + } else { + log.Debugf("auth manager: HasUserPrivilegeOnEntities returned %v when checking privileges %v on entities %v "+ + "for user %s and for vCenter %q", result, privIds, entities, userName, vc.Config.Host) + } + for index, entityPriv := range result { hasPriv := true privAvails := entityPriv.PrivAvailability @@ -401,6 +406,11 @@ func getDatastoresWithBlockVolumePrivs(ctx context.Context, vc *cnsvsphere.Virtu "for vCenter %q", dsInfos[index].Info.Name, dsURLs[index], vc.Config.Host) } } + if len(result) != 0 && len(dsURLToInfoMap) == 0 { + log.Infof("auth manager: user %s on vCenter %q doesn't have privileges for any datastore. "+ + "HasUserPrivilegeOnEntities returns %v, when checking privileges %v on entities %v."+ + userName, vc.Config.Host, result, privIds, entities) + } return dsURLToInfoMap, nil } @@ -521,9 +531,14 @@ func getFSEnabledClustersWithPriv(ctx context.Context, vc *cnsvsphere.VirtualCen privIds, entities, userName, vc.Config.Host) return nil, err } - log.Debugf( - "auth manager: HasUserPrivilegeOnEntities returns %v when checking privileges %v on entities %v for user %s "+ - "and for vCenter %q", result, privIds, entities, userName, vc.Config.Host) + if len(result) == 0 { + log.Infof("auth manager: HasUserPrivilegeOnEntities returned empty result when checking privileges %v "+ + "on entities %v for user %s and for vCenter %q", privIds, entities, userName, vc.Config.Host) + } else { + log.Debugf("auth manager: HasUserPrivilegeOnEntities returned %v when checking privileges %v on entities %v "+ + "for user %s and for vCenter %q", result, privIds, entities, userName, vc.Config.Host) + } + clusterComputeResourceWithPriv := []*object.ClusterComputeResource{} for _, entityPriv := range result { hasPriv := true @@ -540,8 +555,14 @@ func getFSEnabledClustersWithPriv(ctx context.Context, vc *cnsvsphere.VirtualCen clusterComputeResourcesMap[entityPriv.Entity.Value]) } } - log.Debugf("Clusters with priv: %s and vCenter: %q are : %+v", HostConfigStoragePriv, - vc.Config.Host, clusterComputeResourceWithPriv) + if len(result) != 0 && len(clusterComputeResourceWithPriv) == 0 { + log.Infof("auth manager: user %s on vCenter %q doesn't have privileges for any ClusterComputeResource. "+ + "HasUserPrivilegeOnEntities returns %v, when checking privileges %v on entities %v."+ + userName, vc.Config.Host, result, privIds, entities) + } else { + log.Debugf("Clusters with priv: %s and vCenter: %q are : %+v", HostConfigStoragePriv, + vc.Config.Host, clusterComputeResourceWithPriv) + } // Get clusters which are vSAN and have vSAN FS enabled. clusterComputeResourceWithPrivAndFS := []*object.ClusterComputeResource{} @@ -558,15 +579,18 @@ func getFSEnabledClustersWithPriv(ctx context.Context, vc *cnsvsphere.VirtualCen cluster, vc.Config.Host) continue } else if config.FileServiceConfig == nil { - log.Debugf("VsanClusterGetConfig.FileServiceConfig is empty. Skipping this cluster: %+v with "+ + log.Infof("VsanClusterGetConfig.FileServiceConfig is empty. Skipping this cluster: %+v with "+ "vCenter: %q and with config: %+v", cluster, vc.Config.Host, config) continue } - log.Debugf("cluster: %+v and vCenter: %q has vSAN file services enabled: %t", cluster, vc.Config.Host, - config.FileServiceConfig.Enabled) if config.FileServiceConfig.Enabled { clusterComputeResourceWithPrivAndFS = append(clusterComputeResourceWithPrivAndFS, cluster) + log.Debugf("vSAN file service is enabled for cluster: %+v and vCenter: %q.", + cluster, vc.Config.Host) + } else { + log.Infof("vSAN file service is disabled for cluster: %+v and vCenter: %q.", + cluster, vc.Config.Host) } } diff --git a/pkg/csi/service/common/common_controller_helper.go b/pkg/csi/service/common/common_controller_helper.go index 7109f6d526..2408715c47 100644 --- a/pkg/csi/service/common/common_controller_helper.go +++ b/pkg/csi/service/common/common_controller_helper.go @@ -23,8 +23,8 @@ import ( "strings" "time" - snap "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snap "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -318,7 +318,7 @@ func IsVolumeSnapshotReady(ctx context.Context, client snapshotterClientSet.Inte var svs *snap.VolumeSnapshot waitErr := wait.PollImmediate(5*time.Second, timeout, func() (done bool, err error) { - svs, err := client.SnapshotV1().VolumeSnapshots(namespace). + svs, err = client.SnapshotV1().VolumeSnapshots(namespace). Get(ctx, supervisorVolumeSnapshotName, metav1.GetOptions{}) if err != nil { msg := fmt.Sprintf("unable to fetch volumesnapshot %q/%q "+ @@ -327,14 +327,21 @@ func IsVolumeSnapshotReady(ctx context.Context, client snapshotterClientSet.Inte log.Warnf(msg) return false, logger.LogNewErrorf(log, msg) } + if svs == nil || svs.Status == nil || svs.Status.ReadyToUse == nil { + log.Infof("Waiting up to %d seconds for VolumeSnapshot %v in namespace %s to be ReadyToUse, %+vs "+ + "since the start time", timeoutSeconds, supervisorVolumeSnapshotName, namespace, + time.Since(startTime).Seconds()) + return false, nil + } isSnapshotReadyToUse := *svs.Status.ReadyToUse if isSnapshotReadyToUse { log.Infof("VolumeSnapshot %s/%s is in ReadyToUse state", namespace, supervisorVolumeSnapshotName) isReadyToUse = true return true, nil } else { - log.Warnf("Waiting for VolumeSnapshot %s/%s to be ready since %+vs", namespace, - supervisorVolumeSnapshotName, time.Since(startTime).Seconds()) + log.Infof("Waiting up to %d seconds for VolumeSnapshot %v in namespace %s to be ReadyToUse, %+vs "+ + "since the start time", timeoutSeconds, supervisorVolumeSnapshotName, namespace, + time.Since(startTime).Seconds()) } return false, nil }) diff --git a/pkg/csi/service/common/commonco/coagnostic.go b/pkg/csi/service/common/commonco/coagnostic.go index 1882325063..6355493b76 100644 --- a/pkg/csi/service/common/commonco/coagnostic.go +++ b/pkg/csi/service/common/commonco/coagnostic.go @@ -81,6 +81,9 @@ type COCommonInterface interface { GetCSINodeTopologyInstancesList() []interface{} // GetCSINodeTopologyInstanceByName fetches the CSINodeTopology instance for a given node name in the cluster. GetCSINodeTopologyInstanceByName(nodeName string) (item interface{}, exists bool, err error) + // GetPVNameFromCSIVolumeID retrieves the pv name from the volumeID. + // This method will not return pv name in case of in-tree migrated volumes + GetPVNameFromCSIVolumeID(volumeID string) (string, bool) } // GetContainerOrchestratorInterface returns orchestrator object for a given diff --git a/pkg/csi/service/common/commonco/k8sorchestrator/k8sorchestrator.go b/pkg/csi/service/common/commonco/k8sorchestrator/k8sorchestrator.go index 71c340ee02..0fca8e71b5 100644 --- a/pkg/csi/service/common/commonco/k8sorchestrator/k8sorchestrator.go +++ b/pkg/csi/service/common/commonco/k8sorchestrator/k8sorchestrator.go @@ -27,7 +27,7 @@ import ( "sync/atomic" "time" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" cnstypes "github.com/vmware/govmomi/cns/types" pbmtypes "github.com/vmware/govmomi/pbm/types" v1 "k8s.io/api/core/v1" @@ -503,7 +503,7 @@ func initFSS(ctx context.Context, k8sClient clientset.Interface, break } // Set up namespaced listener for cnscsisvfeaturestate CR. - dynInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := dynInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ // Add. AddFunc: func(obj interface{}) { fssCRAdded(obj) @@ -517,6 +517,9 @@ func initFSS(ctx context.Context, k8sClient clientset.Interface, fssCRDeleted(obj) }, }) + if err != nil { + return + } stopCh := make(chan struct{}) log.Infof("Informer to watch on %s CR starting..", featurestates.CRDSingular) dynInformer.Informer().Run(stopCh) @@ -1067,7 +1070,7 @@ func (c *K8sOrchestrator) IsFSSEnabled(ctx context.Context, featureName string) } if !supervisorFeatureState { // If FSS set to false, return. - log.Infof("%s feature state set to false in %s ConfigMap", featureName, c.supervisorFSS.configMapName) + log.Infof("%s feature state is set to false in %s ConfigMap", featureName, c.supervisorFSS.configMapName) return supervisorFeatureState } } else { @@ -1544,3 +1547,8 @@ func (c *K8sOrchestrator) CreateConfigMap(ctx context.Context, name string, name return nil } + +// GetPVNameFromCSIVolumeID retrieves the pv name from volumeID using volumeIDToNameMap. +func (c *K8sOrchestrator) GetPVNameFromCSIVolumeID(volumeID string) (string, bool) { + return c.volumeIDToNameMap.get(volumeID) +} diff --git a/pkg/csi/service/common/commonco/k8sorchestrator/topology.go b/pkg/csi/service/common/commonco/k8sorchestrator/topology.go index 21749602d3..7691541b6f 100644 --- a/pkg/csi/service/common/commonco/k8sorchestrator/topology.go +++ b/pkg/csi/service/common/commonco/k8sorchestrator/topology.go @@ -109,7 +109,7 @@ var ( // isMultiVCSupportEnabled is set to true only when the MultiVCenterCSITopology FSS // is enabled. isMultivCenterCluster is set to true only when the MultiVCenterCSITopology FSS // is enabled and the K8s cluster involves multiple VCs. - isMultiVCSupportEnabled, isMultivCenterCluster bool + isMultiVCSupportEnabled bool // csiNodeTopologyInformer refers to a shared K8s informer listening on CSINodeTopology instances // in the cluster. csiNodeTopologyInformer *cache.SharedIndexInformer @@ -128,9 +128,6 @@ type nodeVolumeTopology struct { k8sConfig *restclient.Config // clusterFlavor is the cluster flavor. clusterFlavor cnstypes.CnsClusterFlavor - // isCSINodeIdFeatureEnabled indicates whether the - // use-csinode-id feature is enabled or not. - isCSINodeIdFeatureEnabled bool } // controllerVolumeTopology implements the commoncotypes.ControllerTopologyService interface @@ -145,9 +142,6 @@ type controllerVolumeTopology struct { nodeMgr node.Manager // clusterFlavor is the cluster flavor. clusterFlavor cnstypes.CnsClusterFlavor - // isCSINodeIdFeatureEnabled indicates whether the - // use-csinode-id feature is enabled or not. - isCSINodeIdFeatureEnabled bool // isAcceptPreferredDatastoresFSSEnabled indicates whether the // accept-preferred-datastores feature is enabled or not. isTopologyPreferentialDatastoresFSSEnabled bool @@ -203,22 +197,19 @@ func (c *K8sOrchestrator) InitTopologyServiceInController(ctx context.Context) ( // Set isMultivCenterCluster if the K8s cluster is a multi-VC cluster. isMultiVCSupportEnabled = c.IsFSSEnabled(ctx, common.MultiVCenterCSITopology) - if isMultiVCSupportEnabled { - cfg, err := cnsconfig.GetConfig(ctx) - if err != nil { - return nil, logger.LogNewErrorf(log, "failed to read config. Error: %+v", err) - } - if len(cfg.VirtualCenter) > 1 { - isMultivCenterCluster = true - } + + // Create a cache of topology tags -> VC -> associated MoRefs in that VC to ease volume provisioning. + err = common.DiscoverTagEntities(ctx) + if err != nil { + return nil, logger.LogNewErrorf(log, + "failed to update cache with topology information. Error: %+v", err) } controllerVolumeTopologyInstance = &controllerVolumeTopology{ - k8sConfig: config, - nodeMgr: nodeManager, - csiNodeTopologyInformer: *csiNodeTopologyInformer, - clusterFlavor: clusterFlavor, - isCSINodeIdFeatureEnabled: c.IsFSSEnabled(ctx, common.UseCSINodeId), + k8sConfig: config, + nodeMgr: nodeManager, + csiNodeTopologyInformer: *csiNodeTopologyInformer, + clusterFlavor: clusterFlavor, isTopologyPreferentialDatastoresFSSEnabled: c.IsFSSEnabled(ctx, common.TopologyPreferentialDatastores), } @@ -414,7 +405,7 @@ func startAvailabilityZoneInformer(ctx context.Context, cfg *restclient.Config) return nil, err } availabilityZoneInformer := dynInformer.Informer() - availabilityZoneInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = availabilityZoneInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { azCRAdded(obj) }, @@ -423,6 +414,9 @@ func startAvailabilityZoneInformer(ctx context.Context, cfg *restclient.Config) azCRDeleted(obj) }, }) + if err != nil { + return nil, err + } // Start informer. go func() { @@ -498,7 +492,7 @@ func startTopologyCRInformer(ctx context.Context, cfg *restclient.Config) (*cach return nil, err } topologyInformer := dynInformer.Informer() - topologyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = topologyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ // Typically when the CSINodeTopology instance is created, the // topology labels are not populated till the reconcile loop runs. // However, this Add function will take care of cases where the node @@ -516,6 +510,9 @@ func startTopologyCRInformer(ctx context.Context, cfg *restclient.Config) (*cach topoCRDeleted(obj) }, }) + if err != nil { + return nil, err + } // Start informer. go func() { @@ -545,9 +542,6 @@ func topoCRAdded(obj interface{}) { } if isMultiVCSupportEnabled { common.AddNodeToDomainNodeMapNew(ctx, nodeTopoObj) - if isMultivCenterCluster { - common.AddLabelsToTopologyVCMap(ctx, nodeTopoObj) - } } else { addNodeToDomainNodeMap(ctx, nodeTopoObj) } @@ -599,9 +593,6 @@ func topoCRUpdated(oldObj interface{}, newObj interface{}) { oldNodeTopoObj, newNodeTopoObj) if isMultiVCSupportEnabled { common.RemoveNodeFromDomainNodeMapNew(ctx, oldNodeTopoObj) - if isMultivCenterCluster { - common.RemoveLabelsFromTopologyVCMap(ctx, oldNodeTopoObj) - } } else { removeNodeFromDomainNodeMap(ctx, oldNodeTopoObj) } @@ -610,9 +601,6 @@ func topoCRUpdated(oldObj interface{}, newObj interface{}) { if newNodeTopoObj.Status.Status == csinodetopologyv1alpha1.CSINodeTopologySuccess { if isMultiVCSupportEnabled { common.AddNodeToDomainNodeMapNew(ctx, newNodeTopoObj) - if isMultivCenterCluster { - common.AddLabelsToTopologyVCMap(ctx, newNodeTopoObj) - } } else { addNodeToDomainNodeMap(ctx, newNodeTopoObj) } @@ -634,9 +622,6 @@ func topoCRDeleted(obj interface{}) { if nodeTopoObj.Status.Status == csinodetopologyv1alpha1.CSINodeTopologySuccess { if isMultiVCSupportEnabled { common.RemoveNodeFromDomainNodeMapNew(ctx, nodeTopoObj) - if isMultivCenterCluster { - common.RemoveLabelsFromTopologyVCMap(ctx, nodeTopoObj) - } } else { removeNodeFromDomainNodeMap(ctx, nodeTopoObj) } @@ -719,12 +704,11 @@ func (c *K8sOrchestrator) InitTopologyServiceInNode(ctx context.Context) ( } nodeVolumeTopologyInstance = &nodeVolumeTopology{ - csiNodeTopologyK8sClient: crClient, - csiNodeTopologyWatcher: crWatcher, - k8sClient: k8sClient, - k8sConfig: config, - clusterFlavor: clusterFlavor, - isCSINodeIdFeatureEnabled: c.IsFSSEnabled(ctx, common.UseCSINodeId), + csiNodeTopologyK8sClient: crClient, + csiNodeTopologyWatcher: crWatcher, + k8sClient: k8sClient, + k8sConfig: config, + clusterFlavor: clusterFlavor, } log.Infof("Topology service initiated successfully") } @@ -738,45 +722,49 @@ func (c *K8sOrchestrator) InitTopologyServiceInNode(ctx context.Context) ( func (volTopology *nodeVolumeTopology) GetNodeTopologyLabels(ctx context.Context, nodeInfo *commoncotypes.NodeInfo) ( map[string]string, error) { log := logger.GetLogger(ctx) - var err error - csiNodeTopology := &csinodetopologyv1alpha1.CSINodeTopology{} - csiNodeTopologyKey := types.NamespacedName{ - Name: nodeInfo.NodeName, - } - err = volTopology.csiNodeTopologyK8sClient.Get(ctx, csiNodeTopologyKey, csiNodeTopology) - csiNodeTopologyFound := true - if err != nil { - if !apierrors.IsNotFound(err) { - msg := fmt.Sprintf("failed to get CsiNodeTopology for the node: %q. Error: %+v", nodeInfo.NodeName, err) - return nil, logger.LogNewErrorCodef(log, codes.Internal, msg) - } - csiNodeTopologyFound = false + + if volTopology.clusterFlavor == cnstypes.CnsClusterFlavorGuest { err = createCSINodeTopologyInstance(ctx, volTopology, nodeInfo) if err != nil { return nil, logger.LogNewErrorCodef(log, codes.Internal, err.Error()) } - } - - // there is an already existing topology - if csiNodeTopologyFound && volTopology.clusterFlavor == cnstypes.CnsClusterFlavorVanilla { - newCSINodeTopology := csiNodeTopology.DeepCopy() - - if volTopology.isCSINodeIdFeatureEnabled { - newCSINodeTopology = volTopology.updateNodeIDForTopology(ctx, nodeInfo, newCSINodeTopology) + } else { + csiNodeTopology := &csinodetopologyv1alpha1.CSINodeTopology{} + csiNodeTopologyKey := types.NamespacedName{ + Name: nodeInfo.NodeName, } - // reset the status so as syncer can sync the object again - newCSINodeTopology.Status.Status = "" - _, err = volTopology.patchCSINodeTopology(ctx, csiNodeTopology, newCSINodeTopology) + err = volTopology.csiNodeTopologyK8sClient.Get(ctx, csiNodeTopologyKey, csiNodeTopology) + csiNodeTopologyFound := true if err != nil { - msg := fmt.Sprintf("Fail to patch CsiNodeTopology for the node: %q "+ - "with nodeUUID: %s. Error: %+v", - nodeInfo.NodeName, nodeInfo.NodeID, err) - return nil, logger.LogNewErrorCodef(log, codes.Internal, msg) + if !apierrors.IsNotFound(err) { + msg := fmt.Sprintf("failed to get CsiNodeTopology for the node: %q. Error: %+v", nodeInfo.NodeName, err) + return nil, logger.LogNewErrorCodef(log, codes.Internal, msg) + } + csiNodeTopologyFound = false + err = createCSINodeTopologyInstance(ctx, volTopology, nodeInfo) + if err != nil { + return nil, logger.LogNewErrorCodef(log, codes.Internal, err.Error()) + } + } + // There is an already existing topology. + if csiNodeTopologyFound { + newCSINodeTopology := csiNodeTopology.DeepCopy() + newCSINodeTopology = volTopology.updateNodeIDForTopology(ctx, nodeInfo, newCSINodeTopology) + // reset the status so as syncer can sync the object again + newCSINodeTopology.Status.Status = "" + _, err = volTopology.patchCSINodeTopology(ctx, csiNodeTopology, newCSINodeTopology) + if err != nil { + msg := fmt.Sprintf("Fail to patch CsiNodeTopology for the node: %q "+ + "with nodeUUID: %s. Error: %+v", + nodeInfo.NodeName, nodeInfo.NodeID, err) + return nil, logger.LogNewErrorCodef(log, codes.Internal, msg) + } + log.Infof("Successfully patched CSINodeTopology instance: %q with Uuid: %q", + nodeInfo.NodeName, nodeInfo.NodeID) } - log.Infof("Successfully patched CSINodeTopology instance: %q with Uuid: %q", - nodeInfo.NodeName, nodeInfo.NodeID) } + // Create a watcher for CSINodeTopology CRs. timeoutSeconds := int64((time.Duration(getCSINodeTopologyWatchTimeoutInMin(ctx)) * time.Minute).Seconds()) watchCSINodeTopology, err := volTopology.csiNodeTopologyWatcher.Watch(metav1.ListOptions{ @@ -912,7 +900,7 @@ func getPatchData(oldObj, newObj interface{}) ([]byte, error) { // Create new CSINodeTopology instance if it doesn't exist // Create CSINodeTopology instance with spec.nodeID and spec.nodeUUID -// if cluster flavor is Vanilla and UseCSINodeId feature is enabled +// if cluster flavor is Vanilla // else create with spec.nodeID only. func createCSINodeTopologyInstance(ctx context.Context, volTopology *nodeVolumeTopology, @@ -942,7 +930,7 @@ func createCSINodeTopologyInstance(ctx context.Context, // If both useCnsNodeId feature is enabled and clusterFlavor is Vanilla, // create the CsiNodeTopology instance with nodeID set to node name and // nodeUUID set to node uuid. - if volTopology.isCSINodeIdFeatureEnabled && volTopology.clusterFlavor == cnstypes.CnsClusterFlavorVanilla { + if volTopology.clusterFlavor == cnstypes.CnsClusterFlavorVanilla { csiNodeTopologySpec.Spec = csinodetopologyv1alpha1.CSINodeTopologySpec{ NodeID: nodeInfo.NodeName, NodeUUID: nodeInfo.NodeID, @@ -1250,12 +1238,11 @@ func (volTopology *controllerVolumeTopology) getTopologySegmentsWithMatchingNode // If there is a match, fetch the nodeVM object and add it to matchingNodeVMs. if isMatch { var nodeVM *cnsvsphere.VirtualMachine - if volTopology.isCSINodeIdFeatureEnabled && - volTopology.clusterFlavor == cnstypes.CnsClusterFlavorVanilla { - nodeVM, err = volTopology.nodeMgr.GetNode(ctx, + if volTopology.clusterFlavor == cnstypes.CnsClusterFlavorVanilla { + nodeVM, err = volTopology.nodeMgr.GetNodeVMAndUpdateCache(ctx, nodeTopologyInstance.Spec.NodeUUID, nil) } else { - nodeVM, err = volTopology.nodeMgr.GetNodeByName(ctx, + nodeVM, err = volTopology.nodeMgr.GetNodeVMByNameAndUpdateCache(ctx, nodeTopologyInstance.Spec.NodeID) } if err != nil { @@ -1321,12 +1308,11 @@ func (volTopology *controllerVolumeTopology) getNodesMatchingTopologySegment(ctx } if isMatch { var nodeVM *cnsvsphere.VirtualMachine - if volTopology.isCSINodeIdFeatureEnabled && - volTopology.clusterFlavor == cnstypes.CnsClusterFlavorVanilla { - nodeVM, err = volTopology.nodeMgr.GetNode(ctx, + if volTopology.clusterFlavor == cnstypes.CnsClusterFlavorVanilla { + nodeVM, err = volTopology.nodeMgr.GetNodeVMAndUpdateCache(ctx, nodeTopologyInstance.Spec.NodeUUID, nil) } else { - nodeVM, err = volTopology.nodeMgr.GetNodeByName(ctx, + nodeVM, err = volTopology.nodeMgr.GetNodeVMByNameAndUpdateCache(ctx, nodeTopologyInstance.Spec.NodeID) } if err != nil { diff --git a/pkg/csi/service/common/constants.go b/pkg/csi/service/common/constants.go index d5bf1d7841..be98380bdd 100644 --- a/pkg/csi/service/common/constants.go +++ b/pkg/csi/service/common/constants.go @@ -312,6 +312,11 @@ const ( // on the VolumeSnapshot CR VolumeSnapshotInfoKey = "csi.vsphere.volume/snapshot" + // SupervisorVolumeSnapshotAnnotationKey represents the annotation key on VolumeSnapshot CR + // in Supervisor cluster which is used to indicate that snapshot operation is initiated from + // Guest cluster. + SupervisorVolumeSnapshotAnnotationKey = "csi.vsphere.guest-initiated-csi-snapshot" + // AttributeSupervisorVolumeSnapshotClass represents name of VolumeSnapshotClass AttributeSupervisorVolumeSnapshotClass = "svvolumesnapshotclass" @@ -366,10 +371,6 @@ const ( // CSIWindowsSupport is the feature to support csi block volumes for windows // node. CSIWindowsSupport = "csi-windows-support" - // UseCSINodeId is the feature to make sure CSI will no longer use - // ProviderID on K8s Node API object set by CPI. If not set, CSI - // will continue to use the Provider ID from K8s Node API object. - UseCSINodeId = "use-csinode-id" // TKGsHA is the feature gate to check whether TKGS HA feature // is enabled. TKGsHA = "tkgs-ha" diff --git a/pkg/csi/service/common/placementengine/placement.go b/pkg/csi/service/common/placementengine/placement.go index 9efd251fbe..91973097e2 100644 --- a/pkg/csi/service/common/placementengine/placement.go +++ b/pkg/csi/service/common/placementengine/placement.go @@ -16,137 +16,169 @@ import ( csinodetopologyv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/csinodetopology/v1alpha1" ) +// GetSharedDatastores retrieves the shared accessible datastores for hosts associated +// with the topology segments requested by user. func GetSharedDatastores(ctx context.Context, reqParams interface{}) ( []*cnsvsphere.DatastoreInfo, error) { log := logger.GetLogger(ctx) params := reqParams.(VanillaSharedDatastoresParams) - var sharedDatastores []*cnsvsphere.DatastoreInfo nodeMgr := node.GetManager(ctx) - log.Infof("GetSharedDatastores called with policyID: %q , Topology Segment List: %v", - params.StoragePolicyID, params.TopologySegmentsList) + + log.Infof("GetSharedDatastores called for VC %q with policyID: %q , Topology Segment List: %v", + params.Vcenter.Config.Host, params.StoragePolicyID, params.TopologySegmentsList) + var sharedDatastores []*cnsvsphere.DatastoreInfo // Iterate through each set of topology segments and find shared datastores for that segment. - for _, segments := range params.TopologySegmentsList { - // Fetch nodes compatible with the requested topology segments. - matchingNodeVMs, completeTopologySegments, err := getTopologySegmentsWithMatchingNodes(ctx, - segments, nodeMgr) + /* For example, if the topology environment is as follows: + VC + |-> DC (Category: region, Tag: region1) + |-> Cluster1 (Category: zone, Tag: zone1) + Node1 + Node2 + |-> Cluster2 (Category: zone, Tag: zone2) + Node3 + Node4 + |-> Cluster3 (Category: zone, Tag: zone3) + (No nodeVMs in Cluster3 yet) + + If the user chooses to provision a volume in region1, according to the code below: + `params.TopologySegmentsList` will look like + [ + map[string]string{region:region1} + ] + `reqSegment` will look like map[string]string{region:region1} + `completeTopologySegments` will look like + [ + map[string]string{region:region1, zone:zone1}, + map[string]string{region:region1, zone:zone2} + ] + */ + for _, reqSegment := range params.TopologySegmentsList { + // Fetch the complete hierarchy of topology segments. + completeTopologySegments, err := getExpandedTopologySegments(ctx, reqSegment, nodeMgr) if err != nil { return nil, logger.LogNewErrorf(log, "failed to find nodes in topology segment %+v. Error: %+v", - segments, err) + reqSegment, err) } - if len(matchingNodeVMs) == 0 { + if len(completeTopologySegments) == 0 { log.Warnf("No nodes in the cluster matched the topology requirement: %+v", - segments) + reqSegment) continue } - log.Infof("Obtained list of nodeVMs %+v", matchingNodeVMs) - log.Debugf("completeTopologySegments map: %+v", completeTopologySegments) - // Fetch shared datastores for the matching nodeVMs. - sharedDatastoresInTopology, err := cnsvsphere.GetSharedDatastoresForVMs(ctx, matchingNodeVMs) - if err != nil { - if err == cnsvsphere.ErrNoSharedDatastoresFound { - log.Warnf("no shared datastores found for topology segment: %+v", segments) - continue - } - return nil, logger.LogNewErrorf(log, "failed to get shared datastores for nodes: %+v "+ - "in topology segment %+v. Error: %+v", matchingNodeVMs, segments, err) - } - log.Infof("Obtained list of shared datastores as %+v", sharedDatastoresInTopology) - - // Check storage policy compatibility, if given. - // Datastore comparison by moref. - if params.StoragePolicyID != "" { - var sharedDSMoRef []vimtypes.ManagedObjectReference - for _, ds := range sharedDatastoresInTopology { - sharedDSMoRef = append(sharedDSMoRef, ds.Reference()) - } - compat, err := params.Vcenter.PbmCheckCompatibility(ctx, sharedDSMoRef, params.StoragePolicyID) + log.Infof("TopologySegment %+v expanded as: %+v", reqSegment, completeTopologySegments) + // For each segment in the complete topology segments hierarchy, get the matching hosts. + for _, segment := range completeTopologySegments { + hostMoRefs, err := common.GetHostsForSegment(ctx, segment, params.Vcenter) if err != nil { - return nil, logger.LogNewErrorf(log, "failed to find datastore compatibility "+ - "with storage policy ID %q. vCenter: %q Error: %+v", params.StoragePolicyID, params.Vcenter.Config.Host, err) + return nil, logger.LogNewErrorf(log, + "failed to fetch hosts belonging to topology segment %+v. Error: %+v", segment, err) } - compatibleDsMoids := make(map[string]struct{}) - for _, ds := range compat.CompatibleDatastores() { - compatibleDsMoids[ds.HubId] = struct{}{} + // 1. Fetch shared datastores accessible to all the hosts in this segment. + sharedDatastoresInTopologySegment, err := cnsvsphere.GetSharedDatastoresForHosts(ctx, hostMoRefs) + if err != nil { + if err == cnsvsphere.ErrNoSharedDSFound { + log.Warnf("no shared datastores found for hosts %+v belonging to topology segment: %+v", + hostMoRefs, segment) + continue + } + return nil, logger.LogNewErrorf(log, "failed to get shared datastores for hosts: %+v "+ + "in topology segment %+v. Error: %+v", hostMoRefs, segment, err) } - log.Infof("Datastores compatible with storage policy %q are %+v for vCenter: %q", params.StoragePolicyID, - compatibleDsMoids, params.Vcenter.Config.Host) + log.Infof("Obtained list of shared datastores %+v for hosts %+v", sharedDatastoresInTopologySegment, + hostMoRefs) - // Filter compatible datastores from shared datastores list. - var compatibleDatastores []*cnsvsphere.DatastoreInfo - for _, ds := range sharedDatastoresInTopology { - if _, exists := compatibleDsMoids[ds.Reference().Value]; exists { - compatibleDatastores = append(compatibleDatastores, ds) + // 2. Check storage policy compatibility, if given. + // Storage policy compatibility is given a higher preference than + // preferential datastore in that topology segment. + if params.StoragePolicyID != "" { + var sharedDSMoRef []vimtypes.ManagedObjectReference + for _, ds := range sharedDatastoresInTopologySegment { + sharedDSMoRef = append(sharedDSMoRef, ds.Reference()) } - } - if len(compatibleDatastores) == 0 { - log.Errorf("No compatible shared datastores found for storage policy %q on vCenter: %q", - params.StoragePolicyID, params.Vcenter.Config.Host) - continue - } - sharedDatastoresInTopology = compatibleDatastores - } - // Further, filter the compatible datastores with preferential datastores, if any. - // Datastore comparison by URL. - if common.PreferredDatastoresExist { - // Fetch all preferred datastore URLs for the matching topology segments. - allPreferredDSURLs := make(map[string]struct{}) - for _, topoSegs := range completeTopologySegments { - prefDS := common.GetPreferredDatastoresInSegments(ctx, topoSegs, params.Vcenter.Config.Host) - log.Infof("Preferential datastores: %v for topology segment: %v on vCenter: %q", prefDS, - topoSegs, params.Vcenter.Config.Host) - for key, val := range prefDS { - allPreferredDSURLs[key] = val + compat, err := params.Vcenter.PbmCheckCompatibility(ctx, sharedDSMoRef, params.StoragePolicyID) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to find datastore compatibility "+ + "with storage policy ID %q. vCenter: %q Error: %+v", params.StoragePolicyID, + params.Vcenter.Config.Host, err) } - } - if len(allPreferredDSURLs) != 0 { - // If there are preferred datastores among the compatible - // datastores, choose the preferred datastores, otherwise - // choose the compatible datastores. - log.Debugf("Filtering preferential datastores from compatible datastores") - var preferredDS []*cnsvsphere.DatastoreInfo - for _, dsInfo := range sharedDatastoresInTopology { - if _, ok := allPreferredDSURLs[dsInfo.Info.Url]; ok { - preferredDS = append(preferredDS, dsInfo) + compatibleDsMoids := make(map[string]struct{}) + for _, ds := range compat.CompatibleDatastores() { + compatibleDsMoids[ds.HubId] = struct{}{} + } + log.Infof("Datastores compatible with storage policy %q are %+v for vCenter: %q", + params.StoragePolicyID, compatibleDsMoids, params.Vcenter.Config.Host) + + // Filter compatible datastores from shared datastores list. + var compatibleDatastores []*cnsvsphere.DatastoreInfo + for _, ds := range sharedDatastoresInTopologySegment { + // Datastore comparison by moref. + if _, exists := compatibleDsMoids[ds.Reference().Value]; exists { + compatibleDatastores = append(compatibleDatastores, ds) } } - if len(preferredDS) != 0 { - sharedDatastoresInTopology = preferredDS - log.Infof("Using preferred datastores: %+v", preferredDS) + if len(compatibleDatastores) == 0 { + log.Infof("No compatible shared datastores found for storage policy %q on vCenter: %q", + params.StoragePolicyID, params.Vcenter.Config.Host) } else { - log.Infof("No preferential datastore selected for volume provisoning") + log.Infof("Shared datastores compatible with storage policy %q are %+v for vCenter: %q", + params.StoragePolicyID, compatibleDatastores, params.Vcenter.Config.Host) + sharedDatastoresInTopologySegment = compatibleDatastores } } - } - // Update sharedDatastores with the list of datastores received. - // Duplicates will not be added. - for _, ds := range sharedDatastoresInTopology { - var found bool - for _, sharedDS := range sharedDatastores { - if sharedDS.Info.Url == ds.Info.Url { - found = true - break + // 3. Filter the shared datastores with preferential datastores, if any. + // Datastore comparison by URL. + if common.PreferredDatastoresExist { + // Fetch all preferred datastore URLs for the topology segment. + prefDS := common.GetPreferredDatastoresInSegments(ctx, segment, params.Vcenter.Config.Host) + log.Infof("Preferential datastores %v found for topology segment: %v on vCenter: %q", prefDS, + segment, params.Vcenter.Config.Host) + if len(prefDS) != 0 { + // If there are preferred datastores among the shared compatible + // datastores, choose the preferred datastores. + var preferredDS []*cnsvsphere.DatastoreInfo + for _, dsInfo := range sharedDatastoresInTopologySegment { + if _, ok := prefDS[dsInfo.Info.Url]; ok { + preferredDS = append(preferredDS, dsInfo) + } + } + if len(preferredDS) != 0 { + sharedDatastoresInTopologySegment = preferredDS + log.Infof("Using preferred datastores: %+v", preferredDS) + } else { + log.Infof("No preferential datastore selected for volume provisioning") + } } } - if !found { - sharedDatastores = append(sharedDatastores, ds) + // Add the datastore list to sharedDatastores without duplicates. + for _, ds := range sharedDatastoresInTopologySegment { + var found bool + for _, sharedDS := range sharedDatastores { + if ds.Info.Url == sharedDS.Info.Url { + found = true + break + } + } + if !found { + sharedDatastores = append(sharedDatastores, ds) + } } } } + if len(sharedDatastores) != 0 { log.Infof("Shared compatible datastores being considered for volume provisioning on vCenter: %q are: %+v", - sharedDatastores, params.Vcenter.Config.Host) + params.Vcenter.Config.Host, sharedDatastores) } return sharedDatastores, nil } -func getTopologySegmentsWithMatchingNodes(ctx context.Context, requestedSegments map[string]string, - nodeMgr node.Manager) ([]*cnsvsphere.VirtualMachine, []map[string]string, error) { +// getExpandedTopologySegments expands the user given topology requirement to depict the complete hierarchy. +// NOTE: If there is no nodeVM in an AZ, that AZ will be skipped in complete topology hierarchy. +func getExpandedTopologySegments(ctx context.Context, requestedSegments map[string]string, + nodeMgr node.Manager) ([]map[string]string, error) { log := logger.GetLogger(ctx) - var ( vcHost string - matchingNodeVMs []*cnsvsphere.VirtualMachine completeTopologySegments []map[string]string ) // Fetch node topology information from informer cache. @@ -156,13 +188,13 @@ func getTopologySegmentsWithMatchingNodes(ctx context.Context, requestedSegments err := runtime.DefaultUnstructuredConverter.FromUnstructured(val.(*unstructured.Unstructured).Object, &nodeTopologyInstance) if err != nil { - return nil, nil, logger.LogNewErrorf(log, "failed to convert unstructured object %+v to "+ + return nil, logger.LogNewErrorf(log, "failed to convert unstructured object %+v to "+ "CSINodeTopology instance. Error: %+v", val, err) } // Check CSINodeTopology instance `Status` field for success. if nodeTopologyInstance.Status.Status != csinodetopologyv1alpha1.CSINodeTopologySuccess { - return nil, nil, logger.LogNewErrorf(log, "node %q not yet ready. Found CSINodeTopology instance "+ + return nil, logger.LogNewErrorf(log, "node %q not yet ready. Found CSINodeTopology instance "+ "status: %q with error message: %q", nodeTopologyInstance.Name, nodeTopologyInstance.Status.Status, nodeTopologyInstance.Status.ErrorMessage) } @@ -181,23 +213,21 @@ func getTopologySegmentsWithMatchingNodes(ctx context.Context, requestedSegments break } } - // If there is a match, fetch the nodeVM object and add it to matchingNodeVMs. + // If there is a match, check if each compatible NodeVM belongs to the same VC. If not, + // error out as we do not support cross-zonal volume provisioning. if isMatch { - nodeVM, err := nodeMgr.GetNode(ctx, nodeTopologyInstance.Spec.NodeUUID, nil) + nodeVM, err := nodeMgr.GetNodeVMAndUpdateCache(ctx, nodeTopologyInstance.Spec.NodeUUID, nil) if err != nil { - return nil, nil, logger.LogNewErrorf(log, + return nil, logger.LogNewErrorf(log, "failed to retrieve NodeVM %q. Error - %+v", nodeTopologyInstance.Spec.NodeID, err) } - // Check if each compatible NodeVM belongs to the same VC. If not, - // error out as we do not support cross-zonal volume provisioning. if vcHost == "" { vcHost = nodeVM.VirtualCenterHost } else if vcHost != nodeVM.VirtualCenterHost { - return nil, nil, logger.LogNewErrorf(log, + return nil, logger.LogNewErrorf(log, "found NodeVM %q belonging to different vCenter: %q. Expected vCenter: %q", nodeVM.Name(), nodeVM.VirtualCenterHost, vcHost) } - matchingNodeVMs = append(matchingNodeVMs, nodeVM) // Store the complete hierarchy of topology requestedSegments for future use. var exists bool @@ -212,7 +242,8 @@ func getTopologySegmentsWithMatchingNodes(ctx context.Context, requestedSegments } } } - return matchingNodeVMs, completeTopologySegments, nil + + return completeTopologySegments, nil } // GetTopologyInfoFromNodes retrieves the topology information of the given diff --git a/pkg/csi/service/common/topology.go b/pkg/csi/service/common/topology.go index 026426f1d3..bdb620b1a0 100644 --- a/pkg/csi/service/common/topology.go +++ b/pkg/csi/service/common/topology.go @@ -2,15 +2,15 @@ package common import ( "context" + "strings" "sync" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" - "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" - "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/node" cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" csinodetopologyv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/csinodetopology/v1alpha1" ) @@ -35,17 +35,274 @@ var ( preferredDatastoresMap = make(map[string]map[string][]string) // preferredDatastoresMapInstanceLock guards the preferredDatastoresMap from read-write overlaps. preferredDatastoresMapInstanceLock = &sync.RWMutex{} - // topologyVCMapInstanceLock guards the topologyVCMap instance from concurrent writes. - topologyVCMapInstanceLock = &sync.RWMutex{} - // topologyVCMap maintains a cache of topology tags to the vCenter IP/FQDN which holds the tag. - // Example - {region1: {VC1: struct{}{}, VC2: struct{}{}}, - // zone1: {VC1: struct{}{}}, - // zone2: {VC2: struct{}{}}} - // The vCenter IP/FQDN under each tag are maintained as a map of string with nil values to improve - // retrieval and deletion performance. - topologyVCMap = make(map[string]map[string]struct{}) + // tagVCEntityMoRefMap maintains a cache of topology tags to the vCenter IP/FQDN & the MoRef of the + // entity which holds the tag. + // Example - { + // "region-1": {"vc1": [{Type:Datacenter Value:datacenter-3}], "vc2": [{Type:Datacenter Value:datacenter-5}] }, + // "zone-1": {"vc1": [{Type:ClusterComputeResource Value:domain-c12}] }, + // "zone-2": {"vc2": [{Type:ClusterComputeResource Value:domain-c8] },} + tagVCEntityMoRefMap = make(map[string]map[string][]mo.Reference) ) +// DiscoverTagEntities populates tagVCEntityMoRefMap with tagName -> VC -> associated MoRefs mapping. +// NOTE: Any edits to existing topology labels will require a restart of the controller. +func DiscoverTagEntities(ctx context.Context) error { + log := logger.GetLogger(ctx) + // Get CNS config. + cnsCfg, err := config.GetConfig(ctx) + if err != nil { + return logger.LogNewErrorf(log, "failed to fetch CNS config. Error: %+v", err) + } + + var categories []string + zoneCat := strings.TrimSpace(cnsCfg.Labels.Zone) + regionCat := strings.TrimSpace(cnsCfg.Labels.Region) + if zoneCat != "" && regionCat != "" { + categories = []string{zoneCat, regionCat} + } else if strings.TrimSpace(cnsCfg.Labels.TopologyCategories) != "" { + categories = strings.Split(cnsCfg.Labels.TopologyCategories, ",") + for index := range categories { + categories[index] = strings.TrimSpace(categories[index]) + } + } else { + log.Infof("DiscoverTagEntities: No topology information found in CNS config.") + return nil + } + log.Infof("Topology categories being considered for tag to VC mapping are %+v", categories) + + vcenterConfigs, err := cnsvsphere.GetVirtualCenterConfigs(ctx, cnsCfg) + if err != nil { + return logger.LogNewErrorf(log, "failed to get VirtualCenterConfigs. Error: %v", err) + } + for _, vcenterCfg := range vcenterConfigs { + // Get VC instance + vcenter, err := cnsvsphere.GetVirtualCenterInstanceForVCenterConfig(ctx, vcenterCfg, false) + if err != nil { + return logger.LogNewErrorf(log, "failed to get vCenterInstance for vCenter Host: %q. Error: %v", + vcenterCfg.Host, err) + } + // Get tag manager instance. + tagManager, err := cnsvsphere.GetTagManager(ctx, vcenter) + if err != nil { + return logger.LogNewErrorf(log, "failed to create tagManager. Error: %v", err) + } + defer func() { + err := tagManager.Logout(ctx) + if err != nil { + log.Errorf("failed to logout tagManager. Error: %v", err) + } + }() + for _, cat := range categories { + topoTags, err := tagManager.GetTagsForCategory(ctx, cat) + if err != nil { + return logger.LogNewErrorf(log, "failed to fetch tags for category %q", cat) + } + log.Infof("Tags associated with category %q are %+v", cat, topoTags) + for _, tag := range topoTags { + objMORs, err := tagManager.ListAttachedObjects(ctx, tag.ID) + if err != nil { + return logger.LogNewErrorf(log, "failed to fetch objects associated with tag %q", tag.Name) + } + log.Infof("Entities associated with tag %q are %+v", tag.Name, objMORs) + if len(objMORs) == 0 { + continue + } + if _, exists := tagVCEntityMoRefMap[tag.Name]; !exists { + tagVCEntityMoRefMap[tag.Name] = map[string][]mo.Reference{vcenterCfg.Host: objMORs} + } else { + tagVCEntityMoRefMap[tag.Name][vcenterCfg.Host] = objMORs + } + } + } + } + log.Debugf("tagVCEntityMoRefMap: %+v", tagVCEntityMoRefMap) + return nil +} + +// GetHostsForSegment retrieves the list of hosts for a topology segment by first +// finding the entities associated with the tag lower in hierarchy. +func GetHostsForSegment(ctx context.Context, topoSegment map[string]string, vCenter *cnsvsphere.VirtualCenter) ( + []*cnsvsphere.HostSystem, error) { + log := logger.GetLogger(ctx) + var ( + allhostSlices [][]*cnsvsphere.HostSystem + ) + + // Get the entity MoRefs for each tag. + for key, tag := range topoSegment { + var hostList []*cnsvsphere.HostSystem + entityMorefs, exists := areEntityMorefsPresentForTag(tag, vCenter.Config.Host) + if !exists { + // Refresh cache to see if the tag has been added recently. + log.Infof("Refresh cache to see if the tag has been added recently") + err := DiscoverTagEntities(ctx) + if err != nil { + return nil, logger.LogNewErrorf(log, + "failed to update cache with topology information. Error: %+v", err) + } + entityMorefs, exists = areEntityMorefsPresentForTag(tag, vCenter.Config.Host) + if !exists { + return nil, logger.LogNewErrorf(log, "failed to find tag %q in VC %q.", tag, vCenter.Config.Host) + } + } + log.Infof("Tag %q is applied on entities %+v", tag, entityMorefs) + log.Debugf("Fetching hosts for entities %+v", entityMorefs) + for _, entity := range entityMorefs { + hosts, err := fetchHosts(ctx, entity, vCenter) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to fetch hosts from entity %+v. Error: %+v", + entity.Reference(), err) + } + hostList = append(hostList, hosts...) + } + log.Infof("Hosts returned for topology category: %q and tag: %q are %v", key, tag, hostList) + allhostSlices = append(allhostSlices, hostList) + } + commonHosts := findCommonHostsforAllTopologyKeys(ctx, allhostSlices) + log.Infof("common hosts: %v for all segments: %v", commonHosts, topoSegment) + return commonHosts, nil +} + +// findCommonHostsforAllTopologyKeys helps find common hosts across all slices in hostLists +func findCommonHostsforAllTopologyKeys(ctx context.Context, + hostLists [][]*cnsvsphere.HostSystem) []*cnsvsphere.HostSystem { + log := logger.GetLogger(ctx) + log.Infof("finding common hosts for hostlists: %v", hostLists) + if len(hostLists) == 0 { + return []*cnsvsphere.HostSystem{} + } + // Create a map to store hosts and their occurrence count + hostCount := make(map[string]int) + // Count occurrences of elements in the first slice + for _, host := range hostLists[0] { + hostCount[host.String()]++ + } + log.Debugf("hostCount after setting count in the first slice : %v", hostCount) + // Iterate through the remaining slices and update the hostCount map + for i := 1; i < len(hostLists); i++ { + for _, host := range hostLists[i] { + // If the host exists in the map, increment its count + if count, exists := hostCount[host.String()]; exists { + hostCount[host.String()] = count + 1 + } + } + } + log.Debugf("hostCount after iterate through the remaining slices and updated hostCount map : %v", hostCount) + // Create a slice to store the intersection + var commonHosts []string + + // Check if each hosts occurred in all slices + for hostMoRefName, count := range hostCount { + if count == len(hostLists) { + commonHosts = append(commonHosts, hostMoRefName) + } + } + log.Debugf("common hosts: %v", commonHosts) + + // Iterate through the all slices and get common hosts + var commonHostSystem []*cnsvsphere.HostSystem + for _, host := range commonHosts { + out: + for i := 0; i < len(hostLists); i++ { + for _, hostSystem := range hostLists[i] { + if hostSystem.String() == host { + commonHostSystem = append(commonHostSystem, hostSystem) + break out + } + } + } + } + log.Debugf("commonHostSystem: %v", commonHostSystem) + return commonHostSystem +} + +// fetchHosts gives a list of hosts under the entity given as input. +func fetchHosts(ctx context.Context, entity mo.Reference, vCenter *cnsvsphere.VirtualCenter) ( + []*cnsvsphere.HostSystem, error) { + log := logger.GetLogger(ctx) + var hosts []*cnsvsphere.HostSystem + log.Infof("fetching hosts for entity: %v on vCenter: %q", entity, vCenter.Config.Host) + switch entity.Reference().Type { + case "rootFolder": + folder := object.NewFolder(vCenter.Client.Client, entity.Reference()) + children, err := folder.Children(ctx) + if err != nil { + return nil, logger.LogNewErrorf(log, + "failed to retrieve child entities of the rootFolder %+v. Error: %+v", entity.Reference(), err) + } + for _, child := range children { + hostList, err := fetchHosts(ctx, child.Reference(), vCenter) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to fetch hosts from entity %+v. Error: %+v", + child.Reference(), err) + } + hosts = append(hosts, hostList...) + } + case "Datacenter": + dc := cnsvsphere.Datacenter{ + Datacenter: object.NewDatacenter(vCenter.Client.Client, entity.Reference()), + VirtualCenterHost: vCenter.Config.Host} + var dcMo mo.Datacenter + err := dc.Properties(ctx, dc.Reference(), []string{"hostFolder"}, &dcMo) + if err != nil { + return nil, logger.LogNewErrorf(log, ""+ + "failed to retrieve hostFolder property for datacenter %+v", dc.Reference()) + } + hostList, err := fetchHosts(ctx, dcMo.HostFolder, vCenter) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to fetch hosts from entity %+v. Error: %+v", + dcMo, err) + } + hosts = append(hosts, hostList...) + case "Folder": + folder := object.NewFolder(vCenter.Client.Client, entity.Reference()) + children, err := folder.Children(ctx) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to fetch child entities of Folder %+v. Error: %+v", + entity.Reference(), err) + } + for _, child := range children { + hostList, err := fetchHosts(ctx, child.Reference(), vCenter) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to fetch hosts from entity %+v. Error: %+v", + child.Reference(), err) + } + hosts = append(hosts, hostList...) + } + case "ClusterComputeResource": + ccr := cnsvsphere.ClusterComputeResource{ + ClusterComputeResource: object.NewClusterComputeResource(vCenter.Client.Client, entity.Reference()), + VirtualCenterHost: vCenter.Config.Host} + hostList, err := ccr.GetHosts(ctx) + if err != nil { + return nil, logger.LogNewErrorf(log, "failed to retrieve hosts from cluster %+v. Error: %+v", + entity.Reference(), err) + } + hosts = append(hosts, hostList...) + case "HostSystem": + host := cnsvsphere.HostSystem{HostSystem: object.NewHostSystem(vCenter.Client.Client, entity.Reference())} + hosts = append(hosts, &host) + default: + return nil, logger.LogNewErrorf(log, "unrecognised entity type found %+v.", entity.Reference()) + } + + return hosts, nil +} + +// areEntityMorefsPresentForTag retrieves the entities in given VC which have the +// input tag associated with them. +func areEntityMorefsPresentForTag(tag, vcHost string) ([]mo.Reference, bool) { + vcEntityMap, exists := tagVCEntityMoRefMap[tag] + if !exists { + return nil, false + } + entityMorefs, exists := vcEntityMap[vcHost] + if !exists { + return nil, false + } + return entityMorefs, true +} + // GetAccessibilityRequirementsByVC clubs the accessibility requirements by the VC they belong to. func GetAccessibilityRequirementsByVC(ctx context.Context, topoReq *csi.TopologyRequirement) ( map[string][]map[string]string, error) { @@ -68,7 +325,7 @@ func GetAccessibilityRequirementsByVC(ctx context.Context, topoReq *csi.Topology return vcTopoSegmentsMap, nil } -// getVCForTopologySegments uses the topologyVCMap to retrieve the +// getVCForTopologySegments uses the tagVCEntityMoRefMap to retrieve the // VC instance for the given topology segments map in a multi-VC environment. func getVCForTopologySegments(ctx context.Context, topologySegments map[string]string) (string, error) { log := logger.GetLogger(ctx) @@ -77,10 +334,10 @@ func getVCForTopologySegments(ctx context.Context, topologySegments map[string]s vcCountMap := make(map[string]int) // Find the VC which contains all the labels given in the topologySegments. - // For example, if topologyVCMap looks like - // {"region-1": {"vc1": struct{}{}, "vc2": struct{}{} }, - // "zone-1": {"vc1": struct{}{} }, - // "zone-2": {"vc2": struct{}{} },} + // For example, if tagVCEntityMoRefMap looks like + // {"region-1": {"vc1": [{Type:Datacenter Value:datacenter-3}], "vc2": [{Type:Datacenter Value:datacenter-5}] }, + // "zone-1": {"vc1": [{Type:ClusterComputeResource Value:domain-c12}] }, + // "zone-2": {"vc2": [{Type:ClusterComputeResource Value:domain-c8] },} // For a given topologySegment // {"topology.csi.vmware.com/k8s-region": "region-1", // "topology.csi.vmware.com/k8s-zone": "zone-2"} @@ -88,12 +345,22 @@ func getVCForTopologySegments(ctx context.Context, topologySegments map[string]s // We go over the vcCountMap to check which VC has a count equal to // the len(topologySegment), in this case 2 and return that VC. for topologyKey, label := range topologySegments { - if vcList, exists := topologyVCMap[label]; exists { - for vc := range vcList { + vcMap, exists := tagVCEntityMoRefMap[label] + if !exists { + // Refresh cache to see if the tag has been added recently. + err := DiscoverTagEntities(ctx) + if err != nil { + return "", logger.LogNewErrorf(log, + "failed to update cache with tag to VC to MoRef mapping. Error: %+v", err) + } + vcMap, exists = tagVCEntityMoRefMap[label] + } + if exists { + for vc := range vcMap { vcCountMap[vc] = vcCountMap[vc] + 1 } } else { - return "", logger.LogNewErrorf(log, "Topology label %q not found in topology to vCenter mapping.", + return "", logger.LogNewErrorf(log, "Topology label %q not found in tag to vCenter mapping.", topologyKey+":"+label) } } @@ -205,7 +472,7 @@ func RefreshPreferentialDatastoresForMultiVCenter(ctx context.Context) error { preferredDatastoresMap = prefDatastoresMap PreferredDatastoresExist = true log.Debugf("preferredDatastoresMap :%v", preferredDatastoresMap) - log.Debugf("PreferredDatastoresExist: %v", PreferredDatastoresExist) + log.Debugf("PreferredDatastoresExist: %t", PreferredDatastoresExist) } return nil } @@ -235,53 +502,6 @@ func GetPreferredDatastoresInSegments(ctx context.Context, segments map[string]s return allPreferredDSURLs } -// AddLabelsToTopologyVCMap adds topology label to VC mapping for given CSINodeTopology instance -// in the topologyVCMap variable. -func AddLabelsToTopologyVCMap(ctx context.Context, nodeTopoObj csinodetopologyv1alpha1.CSINodeTopology) { - log := logger.GetLogger(ctx) - // Get node manager instance. - nodeManager := node.GetManager(ctx) - nodeVM, err := nodeManager.GetNode(ctx, nodeTopoObj.Spec.NodeUUID, nil) - if err != nil { - log.Errorf("Node %q is not yet registered in the node manager. Error: %+v", - nodeTopoObj.Spec.NodeUUID, err) - return - } - log.Infof("Topology labels %+v belong to %q VC", nodeTopoObj.Status.TopologyLabels, - nodeVM.VirtualCenterHost) - // Update topologyVCMap with topology label and associated VC host. - topologyVCMapInstanceLock.Lock() - defer topologyVCMapInstanceLock.Unlock() - for _, label := range nodeTopoObj.Status.TopologyLabels { - if _, exists := topologyVCMap[label.Value]; !exists { - topologyVCMap[label.Value] = map[string]struct{}{nodeVM.VirtualCenterHost: {}} - } else { - topologyVCMap[label.Value][nodeVM.VirtualCenterHost] = struct{}{} - } - } -} - -// RemoveLabelsFromTopologyVCMap removes the topology label to VC mapping for given CSINodeTopology -// instance in the topologyVCMap variable. -func RemoveLabelsFromTopologyVCMap(ctx context.Context, nodeTopoObj csinodetopologyv1alpha1.CSINodeTopology) { - log := logger.GetLogger(ctx) - // Get node manager instance. - nodeManager := node.GetManager(ctx) - nodeVM, err := nodeManager.GetNode(ctx, nodeTopoObj.Spec.NodeUUID, nil) - if err != nil { - log.Errorf("Node %q is not yet registered in the node manager. Error: %+v", - nodeTopoObj.Spec.NodeUUID, err) - return - } - log.Infof("Removing VC %q mapping for TopologyLabels %+v.", nodeVM.VirtualCenterHost, - nodeTopoObj.Status.TopologyLabels) - topologyVCMapInstanceLock.Lock() - defer topologyVCMapInstanceLock.Unlock() - for _, label := range nodeTopoObj.Status.TopologyLabels { - delete(topologyVCMap[label.Value], nodeVM.VirtualCenterHost) - } -} - // AddNodeToDomainNodeMapNew adds the CR instance name in the domainNodeMap wherever appropriate. func AddNodeToDomainNodeMapNew(ctx context.Context, nodeTopoObj csinodetopologyv1alpha1.CSINodeTopology) { log := logger.GetLogger(ctx) diff --git a/pkg/csi/service/common/util_test.go b/pkg/csi/service/common/util_test.go index ee1e2f2fab..77f6079b0f 100644 --- a/pkg/csi/service/common/util_test.go +++ b/pkg/csi/service/common/util_test.go @@ -340,6 +340,21 @@ func TestInvalidVolumeCapabilitiesForFile(t *testing.T) { if err := IsValidVolumeCapabilities(ctx, volCap); err == nil { t.Errorf("Invalid file VolCap = %+v passed validation!", volCap) } + + // Invalid case: volumeMode=block and accessMode=MULTI_NODE_READER_ONLY + volCap = []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + }, + }, + } + if err := IsValidVolumeCapabilities(ctx, volCap); err == nil { + t.Errorf("Invalid file VolCap = %+v passed validation!", volCap) + } } func isStorageClassParamsEqual(expected *StorageClassParams, actual *StorageClassParams) bool { diff --git a/pkg/csi/service/common/vsphereutil.go b/pkg/csi/service/common/vsphereutil.go index d6b44105d7..bb6b71dc4f 100644 --- a/pkg/csi/service/common/vsphereutil.go +++ b/pkg/csi/service/common/vsphereutil.go @@ -17,7 +17,6 @@ limitations under the License. package common import ( - "fmt" "strconv" "strings" "time" @@ -90,6 +89,7 @@ func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsCluste } var datastoreObj *vsphere.Datastore var datastores []vim25types.ManagedObjectReference + var datastoreInfoList []*vsphere.DatastoreInfo if spec.ScParams.DatastoreURL == "" { // Check if datastore URL is specified by the storage pool parameter. if spec.VsanDirectDatastoreURL != "" { @@ -109,8 +109,9 @@ func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsCluste } // Search the datastore from the URL in the datacenter list. var datastoreObj *vsphere.Datastore + var datastoreInfoObj *vsphere.DatastoreInfo for _, datacenter := range dcList { - datastoreInfoObj, err := datacenter.GetDatastoreInfoByURL(ctx, spec.VsanDirectDatastoreURL) + datastoreInfoObj, err = datacenter.GetDatastoreInfoByURL(ctx, spec.VsanDirectDatastoreURL) if err != nil { log.Warnf("Failed to find datastore with URL %q in datacenter %q from VC %q, Error: %+v", spec.VsanDirectDatastoreURL, datacenter.InventoryPath, vc.Config.Host, err) @@ -124,6 +125,7 @@ func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsCluste log.Debugf("Successfully fetched the datastore %v from the URL: %v", datastoreObj.Reference(), spec.VsanDirectDatastoreURL) datastores = append(datastores, datastoreObj.Reference()) + datastoreInfoList = append(datastoreInfoList, datastoreInfoObj) break } if datastores == nil { @@ -137,6 +139,7 @@ func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsCluste // If DatastoreURL is not specified in StorageClass, get all shared // datastores. datastores = getDatastoreMoRefs(sharedDatastores) + datastoreInfoList = sharedDatastores } } else { // vc.GetDatacenters returns datacenters found on the VirtualCenter. @@ -152,8 +155,9 @@ func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsCluste return nil, csifault.CSIInternalFault, err } // Check if DatastoreURL specified in the StorageClass is present in any one of the datacenters. + var datastoreInfoObj *vsphere.DatastoreInfo for _, datacenter := range datacenters { - datastoreInfoObj, err := datacenter.GetDatastoreInfoByURL(ctx, spec.ScParams.DatastoreURL) + datastoreInfoObj, err = datacenter.GetDatastoreInfoByURL(ctx, spec.ScParams.DatastoreURL) if err != nil { log.Warnf("failed to find datastore with URL %q in datacenter %q from VC %q, Error: %+v", spec.ScParams.DatastoreURL, datacenter.InventoryPath, vc.Config.Host, err) @@ -184,6 +188,7 @@ func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsCluste } if isSharedDatastoreURL { datastores = append(datastores, datastoreObj.Reference()) + datastoreInfoList = append(datastoreInfoList, datastoreInfoObj) } else { // TODO: Need to figure out which fault need to return when datastore is not accessible to all nodes. // Currently, just return csi.fault.Internal. @@ -276,18 +281,28 @@ func CreateBlockVolumeUtil(ctx context.Context, clusterFlavor cnstypes.CnsCluste } // step 2: validate if the snapshot datastore is compatible with datastore candidates in create spec - compatibleDatastore, err := utils.GetDatastoreRefByURLFromGivenDatastoreList( - ctx, vc, createSpec.Datastores, cnsVolume.DatastoreUrl) - if err != nil { + var compatibleDatastore vim25types.ManagedObjectReference + var foundCompatibleDatastore bool = false + for _, dsInfo := range datastoreInfoList { + if dsInfo.Info.Url == cnsVolume.DatastoreUrl { + log.Infof("compatible datastore found, dsURL = %q, dsRef = %v", dsInfo.Info.Url, + dsInfo.Datastore.Reference()) + compatibleDatastore = dsInfo.Datastore.Reference() + foundCompatibleDatastore = true + break + } + } + if !foundCompatibleDatastore { return nil, csifault.CSIInternalFault, logger.LogNewErrorf(log, "failed to get the compatible datastore for create volume from snapshot %s with error: %+v", spec.ContentSourceSnapshotID, err) } + // overwrite the datatstores field in create spec with the compatible datastore log.Infof("Overwrite the datatstores field in create spec %v with the compatible datastore %v "+ - "when create volume from snapshot %s", createSpec.Datastores, *compatibleDatastore, + "when create volume from snapshot %s", createSpec.Datastores, compatibleDatastore, spec.ContentSourceSnapshotID) - createSpec.Datastores = []vim25types.ManagedObjectReference{*compatibleDatastore} + createSpec.Datastores = []vim25types.ManagedObjectReference{compatibleDatastore} } log.Debugf("vSphere CSI driver creating volume %s with create spec %+v", spec.Name, spew.Sdump(createSpec)) @@ -1004,9 +1019,8 @@ func isExpansionRequired(ctx context.Context, volumeID string, requestedSize int if len(queryResult.Volumes) > 0 { currentSize = queryResult.Volumes[0].BackingObjectDetails.GetCnsBackingObjectDetails().CapacityInMb } else { - msg := fmt.Sprintf("failed to find volume by querying volumeID: %q", volumeID) - log.Error(msg) - return false, err + // Error out as volume is not found during a resize operation. + return false, logger.LogNewErrorf(log, "failed to find volume by querying volumeID: %q", volumeID) } log.Infof("isExpansionRequired: Found current size of volumeID %q to be %d Mb. "+ diff --git a/pkg/csi/service/mounter/mounter_windows.go b/pkg/csi/service/mounter/mounter_windows.go index d7bac867a0..5db8465087 100644 --- a/pkg/csi/service/mounter/mounter_windows.go +++ b/pkg/csi/service/mounter/mounter_windows.go @@ -161,6 +161,20 @@ func (mounter *csiProxyMounter) ExistsPath(ctx context.Context, path string) (bo return isExistsResponse.Exists, err } +// IsMountPoint: determines if a directory is a mountpoint. +func (mounter *csiProxyMounter) IsMountPoint(path string) (bool, error) { + isNotMnt, err := mounter.IsLikelyNotMountPoint(path) + if err != nil { + return false, err + } + return !isNotMnt, nil +} + +// CanSafelySkipMountPointCheck always returns false on Windows +func (mounter *csiProxyMounter) CanSafelySkipMountPointCheck() bool { + return false +} + // Rmdir - delete the given directory func (mounter *csiProxyMounter) Rmdir(ctx context.Context, path string) error { log := logger.GetLogger(ctx) diff --git a/pkg/csi/service/node.go b/pkg/csi/service/node.go index 92988b77df..bec4d0a7a6 100644 --- a/pkg/csi/service/node.go +++ b/pkg/csi/service/node.go @@ -349,15 +349,21 @@ func (driver *vsphereCSIDriver) NodeGetInfo( return nil, logger.LogNewErrorCode(log, codes.Internal, "ENV NODE_NAME is not set") } - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.UseCSINodeId) { + + clusterFlavor, err = cnsconfig.GetClusterFlavor(ctx) + if err != nil { + return nil, err + } + + if clusterFlavor == cnstypes.CnsClusterFlavorGuest { + nodeID = nodeName + } else { // Get VM UUID nodeID, err = driver.osUtils.GetSystemUUID(ctx) if err != nil { return nil, logger.LogNewErrorCodef(log, codes.Internal, "failed to get system uuid for node VM with error: %v", err) } - } else { - nodeID = nodeName } var maxVolumesPerNode int64 @@ -390,11 +396,6 @@ func (driver *vsphereCSIDriver) NodeGetInfo( accessibleTopology map[string]string ) - clusterFlavor, err = cnsconfig.GetClusterFlavor(ctx) - if err != nil { - return nil, err - } - if clusterFlavor == cnstypes.CnsClusterFlavorGuest { if !commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.TKGsHA) { nodeInfoResponse = &csi.NodeGetInfoResponse{ diff --git a/pkg/csi/service/osutils/linux_os_utils.go b/pkg/csi/service/osutils/linux_os_utils.go index 667bba2747..f668ad7c27 100644 --- a/pkg/csi/service/osutils/linux_os_utils.go +++ b/pkg/csi/service/osutils/linux_os_utils.go @@ -318,13 +318,13 @@ func (osUtils *OsUtils) IsBlockVolumePublished(ctx context.Context, volID string if dev == nil { // check if target is mount point - notMountPoint, err := mount.IsNotMountPoint(osUtils.Mounter, target) + isMountPoint, err := osUtils.Mounter.IsMountPoint(target) if err != nil { log.Errorf("error while checking target path %q is mount point err: %v", target, err) return false, logger.LogNewErrorCodef(log, codes.Internal, "failed to verify mount point %q. Error: %v", target, err) } - if !notMountPoint { + if isMountPoint { log.Infof("target %q is mount point", target) return true, nil } diff --git a/pkg/csi/service/vanilla/controller.go b/pkg/csi/service/vanilla/controller.go index bf9ef57c0c..e94f74bc85 100644 --- a/pkg/csi/service/vanilla/controller.go +++ b/pkg/csi/service/vanilla/controller.go @@ -58,12 +58,12 @@ import ( // NodeManagerInterface provides functionality to manage (VM) nodes. type NodeManagerInterface interface { - Initialize(ctx context.Context, useNodeUuid bool) error + Initialize(ctx context.Context) error GetSharedDatastoresInK8SCluster(ctx context.Context) ([]*cnsvsphere.DatastoreInfo, error) - GetNodeByName(ctx context.Context, nodeName string) (*cnsvsphere.VirtualMachine, error) - GetNodeByNameOrUUID(ctx context.Context, nodeName string) (*cnsvsphere.VirtualMachine, error) + GetNodeVMByNameAndUpdateCache(ctx context.Context, nodeName string) (*cnsvsphere.VirtualMachine, error) + GetNodeVMByNameOrUUID(ctx context.Context, nodeName string) (*cnsvsphere.VirtualMachine, error) GetNodeNameByUUID(ctx context.Context, nodeUUID string) (string, error) - GetNodeByUuid(ctx context.Context, nodeUuid string) (*cnsvsphere.VirtualMachine, error) + GetNodeVMByUuid(ctx context.Context, nodeUuid string) (*cnsvsphere.VirtualMachine, error) GetAllNodes(ctx context.Context) ([]*cnsvsphere.VirtualMachine, error) GetAllNodesByVC(ctx context.Context, vcHost string) ([]*cnsvsphere.VirtualMachine, error) } @@ -105,6 +105,7 @@ var ( // variable for list snapshots CNSSnapshotsForListSnapshots = make([]cnstypes.CnsSnapshotQueryResultEntry, 0) CNSVolumeDetailsMap = make([]map[string]*utils.CnsVolumeDetails, 0) + volumeIDToNodeUUIDMap = make(map[string]string) ) // New creates a CNS controller. @@ -295,12 +296,9 @@ func (c *controller) Init(config *cnsconfig.Config, version string) error { } } } - useNodeUuid := false - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.UseCSINodeId) { - useNodeUuid = true - } + c.nodeMgr = &node.Nodes{} - err = c.nodeMgr.Initialize(ctx, useNodeUuid) + err = c.nodeMgr.Initialize(ctx) if err != nil { log.Errorf("failed to initialize nodeMgr. err=%v", err) return err @@ -444,7 +442,7 @@ func (c *controller) ReloadConfiguration() error { // Re-Initialize Node Manager to cache latest vCenter config. log.Debug("Re-Initializing node manager") c.nodeMgr = &node.Nodes{} - err = c.nodeMgr.Initialize(ctx, true) + err = c.nodeMgr.Initialize(ctx) if err != nil { log.Errorf("failed to re-initialize nodeMgr. err=%v", err) return err @@ -487,12 +485,8 @@ func (c *controller) ReloadConfiguration() error { } c.manager.VcenterConfig = newVCConfig // Re-Initialize Node Manager to cache latest vCenter config. - useNodeUuid := false - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.UseCSINodeId) { - useNodeUuid = true - } c.nodeMgr = &node.Nodes{} - err = c.nodeMgr.Initialize(ctx, useNodeUuid) + err = c.nodeMgr.Initialize(ctx) if err != nil { log.Errorf("failed to re-initialize nodeMgr. err=%v", err) return err @@ -711,9 +705,7 @@ func (c *controller) createBlockVolume(ctx context.Context, req *csi.CreateVolum }, } volTaskAlreadyRegistered = true - } else if volumeOperationDetails.OperationDetails.TaskStatus == - cnsvolumeoperationrequest.TaskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + } else if cnsvolume.IsTaskPending(volumeOperationDetails) { // If task is created in CNS for this volume but task is in progress, then // we need to monitor the task to check if volume creation is completed or not. log.Infof("Volume with name %s has CreateVolume task %s pending on CNS.", @@ -1171,9 +1163,7 @@ func (c *controller) createBlockVolumeWithPlacementEngineForMultiVC(ctx context. }, } volTaskAlreadyRegistered = true - } else if volumeOperationDetails.OperationDetails.TaskStatus == - cnsvolumeoperationrequest.TaskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + } else if cnsvolume.IsTaskPending(volumeOperationDetails) { // If task is already created in CNS for this volume but task is in progress, // we need to monitor the task to check if volume creation is complete or not. log.Infof("Volume with name %s has CreateVolume task %s pending on VC %q.", @@ -1562,7 +1552,7 @@ func (c *controller) calculateAccessibleTopologiesForDatastore(ctx context.Conte log := logger.GetLogger(ctx) var datastoreAccessibleTopology []map[string]string - // Find out all nodes which have access to the chosen datastore. + // Find out all nodeVMs which have access to the chosen datastore among all the nodes in k8s cluster. accessibleNodes, err := common.GetNodeVMsWithAccessToDatastore(ctx, vcenter, datastoreURL, allNodeVMs) if err != nil || len(accessibleNodes) == 0 { return nil, logger.LogNewErrorCodef(log, codes.Internal, @@ -1663,9 +1653,7 @@ func (c *controller) createFileVolume(ctx context.Context, req *csi.CreateVolume volumeID = volumeOperationDetails.VolumeID volTaskAlreadyRegistered = true - } else if volumeOperationDetails.OperationDetails.TaskStatus == - cnsvolumeoperationrequest.TaskInvocationStatusInProgress && - volumeOperationDetails.OperationDetails.TaskID != "" { + } else if cnsvolume.IsTaskPending(volumeOperationDetails) { var ( volumeInfo *cnsvolume.CnsVolumeInfo vcenter *cnsvsphere.VirtualCenter @@ -1804,7 +1792,7 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ createVolumeInternal := func() ( *csi.CreateVolumeResponse, string, error) { log.Infof("CreateVolume: called with args %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -1859,6 +1847,9 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ resp, faultType, err := createVolumeInternal() log.Debugf("createVolumeInternal: returns fault %q", faultType) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusCreateVolumeOpType, volumeType, faultType) @@ -1884,7 +1875,7 @@ func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequ deleteVolumeInternal := func() ( *csi.DeleteVolumeResponse, string, error) { log.Infof("DeleteVolume: called with args: %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -1999,6 +1990,9 @@ func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequ resp, faultType, err := deleteVolumeInternal() log.Debugf("deleteVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusDeleteVolumeOpType, volumeType, faultType) @@ -2024,7 +2018,7 @@ func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.Contr controllerPublishVolumeInternal := func() ( *csi.ControllerPublishVolumeResponse, string, error) { log.Infof("ControllerPublishVolume: called with args %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -2113,17 +2107,12 @@ func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.Contr } } var nodevm *cnsvsphere.VirtualMachine - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.UseCSINodeId) { - // if node is not yet updated to run the release of the driver publishing Node VM UUID as Node ID - // look up Node by name - nodevm, err = c.nodeMgr.GetNodeByNameOrUUID(ctx, req.NodeId) - if err == node.ErrNodeNotFound { - log.Infof("Performing node VM lookup using node VM UUID: %q", req.NodeId) - nodevm, err = c.nodeMgr.GetNodeByUuid(ctx, req.NodeId) - } - - } else { - nodevm, err = c.nodeMgr.GetNodeByName(ctx, req.NodeId) + // if node is not yet updated to run the release of the driver publishing Node VM UUID as Node ID + // look up Node by name + nodevm, err = c.nodeMgr.GetNodeVMByNameOrUUID(ctx, req.NodeId) + if err == node.ErrNodeNotFound { + log.Infof("Performing node VM lookup using node VM UUID: %q", req.NodeId) + nodevm, err = c.nodeMgr.GetNodeVMByUuid(ctx, req.NodeId) } if err != nil { return nil, csifault.CSIInternalFault, logger.LogNewErrorCodef(log, codes.Internal, @@ -2148,6 +2137,9 @@ func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.Contr resp, faultType, err := controllerPublishVolumeInternal() log.Debugf("controllerPublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusAttachVolumeOpType, volumeType, faultType) @@ -2174,7 +2166,7 @@ func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.Con *csi.ControllerUnpublishVolumeResponse, string, error) { var faultType string log.Infof("ControllerUnpublishVolume: called with args %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -2254,16 +2246,12 @@ func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.Con // Block Volume. volumeType = prometheus.PrometheusBlockVolumeType var nodevm *cnsvsphere.VirtualMachine - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.UseCSINodeId) { - // if node is not yet updated to run the release of the driver publishing Node VM UUID as Node ID - // look up Node by name - nodevm, err = c.nodeMgr.GetNodeByNameOrUUID(ctx, req.NodeId) - if err == node.ErrNodeNotFound { - log.Infof("Performing node VM lookup using node VM UUID: %q", req.NodeId) - nodevm, err = c.nodeMgr.GetNodeByUuid(ctx, req.NodeId) - } - } else { - nodevm, err = c.nodeMgr.GetNodeByName(ctx, req.NodeId) + // if node is not yet updated to run the release of the driver publishing Node VM UUID as Node ID + // look up Node by name + nodevm, err = c.nodeMgr.GetNodeVMByNameOrUUID(ctx, req.NodeId) + if err == node.ErrNodeNotFound { + log.Infof("Performing node VM lookup using node VM UUID: %q", req.NodeId) + nodevm, err = c.nodeMgr.GetNodeVMByUuid(ctx, req.NodeId) } if err != nil { if err == cnsvsphere.ErrVMNotFound { @@ -2286,6 +2274,9 @@ func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.Con resp, faultType, err := controllerUnpublishVolumeInternal() log.Debugf("controllerUnpublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusDetachVolumeOpType, volumeType, faultType) @@ -2327,8 +2318,15 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro // csifault.CSIInternalFault csifault.CSIUnimplementedFault csifault.CSIInvalidArgumentFault if strings.Contains(req.VolumeId, ".vmdk") { - return nil, csifault.CSIUnimplementedFault, logger.LogNewErrorCodef(log, codes.Unimplemented, - "cannot expand migrated vSphere volume. :%q", req.VolumeId) + if err := initVolumeMigrationService(ctx, c); err != nil { + // Error is already wrapped in CSI error code. + return nil, csifault.CSIInternalFault, err + } + req.VolumeId, err = volumeMigrationService.GetVolumeID(ctx, &migration.VolumeSpec{VolumePath: req.VolumeId}, false) + if err != nil { + return nil, csifault.CSIInternalFault, logger.LogNewErrorCodef(log, codes.Internal, + "failed to get VolumeID from volumeMigrationService for volumePath: %q", req.VolumeId) + } } // Fetch vCenterHost, vCenterManager & volumeManager for given volume, based on VC configuration @@ -2385,7 +2383,7 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro volSizeMB, commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.AsyncQueryVolume)) if err != nil { return nil, faultType, logger.LogNewErrorCodef(log, codes.Internal, - "failed to expand volume: %q to size: %d with error: %+v", volumeID, volSizeMB, err) + "failed to expand volume: %q to size: %d with error: %+v", "df", volSizeMB, err) } // Always set nodeExpansionRequired to true, even if requested size is equal @@ -2411,6 +2409,9 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro resp, faultType, err := controllerExpandVolumeInternal() if err != nil { log.Debugf("controllerExpandVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusExpandVolumeOpType, volumeType, faultType) @@ -2488,6 +2489,7 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques querySelection := cnstypes.CnsQuerySelection{ Names: []string{ string(cnstypes.QuerySelectionNameTypeVolumeType), + string(cnstypes.QuerySelectionNameTypeVolumeName), }, } // For multi-VC configuration, query volumes from all vCenters @@ -2511,7 +2513,22 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques } CNSVolumesforListVolume = cnsQueryResult.Volumes } + + // Get all nodes from the vanilla K8s cluster from the node manager + allNodeVMs, err := c.nodeMgr.GetAllNodes(ctx) + if err != nil { + return nil, csifault.CSIInternalFault, logger.LogNewErrorCodef(log, codes.Internal, + "failed to get nodes(node vms) in the vanilla cluster. Error: %v", err) + } + + // Fetching below map once per resync cycle to be used later while processing the volumes + volumeIDToNodeUUIDMap, err = getBlockVolumeIDToNodeUUIDMap(ctx, c, allNodeVMs) + if err != nil { + return nil, csifault.CSIInternalFault, logger.LogNewErrorCodef(log, codes.Internal, + "get block volumeIDToNodeUUIDMap failed with err = %+v ", err) + } } + // Step 3: If the difference between number of K8s volumes and CNS volumes is greater than threshold, // fail the operation, as it can result in too many attach calls. if len(volIDsInK8s)-len(CNSVolumesforListVolume) > cfg.Global.ListVolumeThreshold { @@ -2526,21 +2543,13 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques maxEntries = len(CNSVolumesforListVolume) } // Step 4: process queryLimit number of items starting from ListVolumeRequest.start_token - var allNodeVMs []*cnsvsphere.VirtualMachine var entries []*csi.ListVolumesResponse_Entry - // Get all nodes from the vanilla K8s cluster from the node manager - allNodeVMs, err = c.nodeMgr.GetAllNodes(ctx) - if err != nil { - return nil, csifault.CSIInternalFault, logger.LogNewErrorCodef(log, codes.Internal, - "failed to get nodes(node vms) in the vanilla cluster. Error: %v", err) - } - nextToken := "" log.Debugf("Starting token: %d, Length of Query volume result: %d, Max entries: %d ", startingToken, len(CNSVolumesforListVolume), maxEntries) entries, nextToken, volumeType, err = c.processQueryResultsListVolumes(ctx, startingToken, maxEntries, - CNSVolumesforListVolume, allNodeVMs) + CNSVolumesforListVolume) if err != nil { return nil, csifault.CSIInternalFault, fmt.Errorf("error while processing query results for list "+ " volumes, err: %v", err) @@ -2556,6 +2565,9 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques listVolResponse, faultType, err := listVolumesInternal() log.Debugf("List volume response: %+v", listVolResponse) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusListVolumeOpType, volumeType, faultType) @@ -2569,7 +2581,7 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques } func (c *controller) processQueryResultsListVolumes(ctx context.Context, startingToken int, maxEntries int, - cnsVolumes []cnstypes.CnsVolume, allNodeVMs []*cnsvsphere.VirtualMachine) ([]*csi.ListVolumesResponse_Entry, + cnsVolumes []cnstypes.CnsVolume) ([]*csi.ListVolumesResponse_Entry, string, string, error) { volumeType := "" @@ -2579,11 +2591,6 @@ func (c *controller) processQueryResultsListVolumes(ctx context.Context, startin log := logger.GetLogger(ctx) var entries []*csi.ListVolumesResponse_Entry - volumeIDToNodeUUIDMap, err := getBlockVolumeIDToNodeUUIDMap(ctx, c, allNodeVMs) - if err != nil { - return entries, nextToken, volumeType, err - } - for i := startingToken; i < len(cnsVolumes); i++ { if cnsVolumes[i].VolumeType == common.FileVolumeType { // If this is multi-VC configuration, then @@ -2604,7 +2611,7 @@ func (c *controller) processQueryResultsListVolumes(ctx context.Context, startin publishedNodeIds := commonco.ContainerOrchestratorUtility.GetNodesForVolumes(ctx, []string{fileVolID}) for volID, nodeName := range publishedNodeIds { if volID == fileVolID && len(nodeName) != 0 { - nodeVMObj, err := c.nodeMgr.GetNodeByName(ctx, publishedNodeIds[fileVolID][0]) + nodeVMObj, err := c.nodeMgr.GetNodeVMByNameAndUpdateCache(ctx, publishedNodeIds[fileVolID][0]) if err != nil { log.Errorf("Failed to get node vm object from the node name, err:%v", err) return entries, nextToken, volumeType, err @@ -2636,9 +2643,21 @@ func (c *controller) processQueryResultsListVolumes(ctx context.Context, startin nodeVMUUID, found := volumeIDToNodeUUIDMap[blockVolID] if found { volCounter += 1 - //Populate csi.Volume info for the given volume + volumeId := blockVolID + // this check is required as volumeMigrationService is not initialized + // when multi-vc is enabled and there is more than 1 vc + if volumeMigrationService != nil { + migratedVolumePath, err := volumeMigrationService.GetVolumePathFromMigrationServiceCache(ctx, blockVolID) + if err != nil && err == common.ErrNotFound { + log.Debugf("volumeID: %v not found in migration service in-memory cache "+ + "so it's not a migrated in-tree volume", blockVolID) + } else if migratedVolumePath != "" { + volumeId = migratedVolumePath + } + } + // Populate csi.Volume info for the given volume blockVolumeInfo := &csi.Volume{ - VolumeId: blockVolID, + VolumeId: volumeId, } // Getting published nodes volStatus := &csi.ListVolumesResponse_VolumeStatus{ @@ -3199,7 +3218,7 @@ func queryAllVolumeSnapshotsForMultiVC(ctx context.Context, c *controller, token CNSSnapshotsForListSnapshots = snapQueryEntries CNSVolumeDetailsMap = cnsVolumeDetailsMap } else { - //fetch snapshots + // fetch snapshots snapQueryEntries, volumeDetails, err := getSnapshotsAndSourceVolumeDetails(ctx, vCenterManager, c.manager.VolumeManager, c.manager.VcenterConfig.Host) if err != nil { diff --git a/pkg/csi/service/vanilla/controller_test.go b/pkg/csi/service/vanilla/controller_test.go index ddff1d3bad..a6bf3125b6 100644 --- a/pkg/csi/service/vanilla/controller_test.go +++ b/pkg/csi/service/vanilla/controller_test.go @@ -126,7 +126,7 @@ func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (*config. cfg.Global.VCenterIP = s.URL.Hostname() cfg.Global.VCenterPort = s.URL.Port() - cfg.Global.User = s.URL.User.Username() + cfg.Global.User = s.URL.User.Username() + "@vsphere.local" cfg.Global.Password, _ = s.URL.User.Password() cfg.Global.Datacenters = "DC0" @@ -169,7 +169,7 @@ func configFromEnvOrSim() (*config.Config, func()) { return cfg, func() {} } -func (f *FakeNodeManager) Initialize(ctx context.Context, useNodeUuid bool) error { +func (f *FakeNodeManager) Initialize(ctx context.Context) error { return nil } @@ -223,11 +223,12 @@ func (f *FakeNodeManager) GetSharedDatastoresInK8SCluster(ctx context.Context) ( }, nil } -func (f *FakeNodeManager) GetNodeByName(ctx context.Context, nodeName string) (*cnsvsphere.VirtualMachine, error) { +func (f *FakeNodeManager) GetNodeVMByNameAndUpdateCache(ctx context.Context, + nodeName string) (*cnsvsphere.VirtualMachine, error) { var vm *cnsvsphere.VirtualMachine var t *testing.T if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { - nodeUUID, err := k8s.GetNodeUUID(ctx, f.k8sClient, nodeName, false) + nodeUUID, err := k8s.GetNodeUUID(ctx, f.k8sClient, nodeName) if err != nil { t.Errorf("failed to get providerId from node: %q. Err: %v", nodeName, err) return nil, err @@ -246,16 +247,16 @@ func (f *FakeNodeManager) GetNodeByName(ctx context.Context, nodeName string) (* return vm, nil } -func (f *FakeNodeManager) GetNodeByNameOrUUID( +func (f *FakeNodeManager) GetNodeVMByNameOrUUID( ctx context.Context, nodeNameOrUUID string) (*cnsvsphere.VirtualMachine, error) { - return f.GetNodeByName(ctx, nodeNameOrUUID) + return f.GetNodeVMByNameAndUpdateCache(ctx, nodeNameOrUUID) } func (f *FakeNodeManager) GetNodeNameByUUID(ctx context.Context, nodeUUID string) (string, error) { return "", nil } -func (f *FakeNodeManager) GetNodeByUuid(ctx context.Context, nodeUuid string) (*cnsvsphere.VirtualMachine, error) { +func (f *FakeNodeManager) GetNodeVMByUuid(ctx context.Context, nodeUuid string) (*cnsvsphere.VirtualMachine, error) { var vm *cnsvsphere.VirtualMachine var t *testing.T if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { diff --git a/pkg/csi/service/wcp/controller.go b/pkg/csi/service/wcp/controller.go index 06c64f1d55..a2895a92c2 100644 --- a/pkg/csi/service/wcp/controller.go +++ b/pkg/csi/service/wcp/controller.go @@ -25,7 +25,6 @@ import ( "sync" "time" - "github.com/vmware/govmomi/vim25/types" "google.golang.org/protobuf/types/known/timestamppb" "github.com/container-storage-interface/spec/lib/go/csi" @@ -69,6 +68,8 @@ var ( csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, + csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, } checkCompatibleDataStores = true ) @@ -78,8 +79,11 @@ var getCandidateDatastores = cnsvsphere.GetCandidateDatastoresInCluster // Contains list of clusterComputeResourceMoIds on which supervisor cluster is deployed. var clusterComputeResourceMoIds = make([]string, 0) -var expectedStartingIndex = 0 -var cnsVolumeIDs = make([]string, 0) +var ( + expectedStartingIndex = 0 + cnsVolumeIDs = make([]string, 0) + vmMoidToHostMoid, volumeIDToVMMap map[string]string +) type controller struct { manager *common.Manager @@ -125,6 +129,7 @@ func (c *controller) Init(config *cnsconfig.Config, version string) error { log.Errorf("failed to get VirtualCenterConfig. err=%v", err) return err } + vcenterconfig.ReloadVCConfigForNewClient = true vcManager := cnsvsphere.GetVirtualCenterManager(ctx) vcenter, err := vcManager.RegisterVirtualCenter(ctx, vcenterconfig) if err != nil { @@ -239,8 +244,8 @@ func (c *controller) Init(config *cnsconfig.Config, version string) error { log.Infof("Successfully reloaded configuration from: %q", cfgPath) break } - log.Errorf("failed to reload configuration. will retry again in 5 seconds. err: %+v", reloadConfigErr) - time.Sleep(5 * time.Second) + log.Errorf("failed to reload configuration. will retry again in 60 seconds. err: %+v", reloadConfigErr) + time.Sleep(60 * time.Second) } } // Handling create event for reconnecting to VC when ca file is @@ -318,6 +323,7 @@ func (c *controller) ReloadConfiguration(reconnectToVCFromNewConfig bool) error return err } if newVCConfig != nil { + newVCConfig.ReloadVCConfigForNewClient = true var vcenter *cnsvsphere.VirtualCenter if c.manager.VcenterConfig.Host != newVCConfig.Host || c.manager.VcenterConfig.Username != newVCConfig.Username || @@ -715,6 +721,17 @@ func (c *controller) createBlockVolume(ctx context.Context, req *csi.CreateVolum } } + // Set the Snapshot VolumeContentSource in the CreateVolumeResponse + if contentSourceSnapshotID != "" { + resp.Volume.ContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: contentSourceSnapshotID, + }, + }, + } + } + return resp, "", nil } @@ -861,6 +878,9 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ log.Debugf("createVolumeInternal: returns fault %q", faultType) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusCreateVolumeOpType, volumeType, faultType) @@ -943,6 +963,9 @@ func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequ log.Debugf("deleteVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusDeleteVolumeOpType, volumeType, faultType) @@ -1098,6 +1121,9 @@ func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.Contr log.Debugf("controllerPublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusAttachVolumeOpType, volumeType, faultType) @@ -1247,6 +1273,9 @@ func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.Con log.Debugf("controllerUnpublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusDetachVolumeOpType, volumeType, faultType) @@ -1337,6 +1366,13 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques for _, cnsVolume := range cnsQueryVolumes.Volumes { cnsVolumeIDs = append(cnsVolumeIDs, cnsVolume.VolumeId.Id) } + + // Get volume ID to VMMap and vmMoidToHostMoid map + vmMoidToHostMoid, volumeIDToVMMap, err = c.GetVolumeToHostMapping(ctx) + if err != nil { + log.Errorf("failed to get VM MoID to Host MoID map, err:%v", err) + return nil, csifault.CSIInternalFault, status.Error(codes.Internal, "failed to get VM MoID to Host MoID map") + } } // If the difference between the volumes reported by Kubernetes and CNS @@ -1369,7 +1405,7 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques volumeIDs = append(volumeIDs, cnsVolumeIDs[i]) } - response, err := getVolumeIDToVMMap(ctx, c, volumeIDs) + response, err := getVolumeIDToVMMap(ctx, volumeIDs, vmMoidToHostMoid, volumeIDToVMMap) if err != nil { log.Errorf("Error while generating ListVolume response, err:%v", err) return nil, csifault.CSIInternalFault, status.Error(codes.Internal, "Error while generating ListVolume response") @@ -1389,6 +1425,9 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques } resp, faultType, err := controllerListVolumeInternal() if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusListVolumeOpType, volumeType, faultType) @@ -1398,7 +1437,7 @@ func (c *controller) ListVolumes(ctx context.Context, req *csi.ListVolumesReques prometheus.CsiControlOpsHistVec.WithLabelValues(volumeType, prometheus.PrometheusListVolumeOpType, prometheus.PrometheusPassStatus, faultType).Observe(time.Since(start).Seconds()) } - return resp, nil + return resp, err } func (c *controller) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) ( @@ -1421,11 +1460,6 @@ func (c *controller) ControllerGetCapabilities(ctx context.Context, req *csi.Con csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES) } - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.BlockVolumeSnapshot) { - controllerCaps = append(controllerCaps, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, - csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) - } - for _, cap := range controllerCaps { c := &csi.ControllerServiceCapability{ Type: &csi.ControllerServiceCapability_Rpc{ @@ -1464,7 +1498,7 @@ func (c *controller) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshot "cannot snapshot migrated vSphere volume. :%q", volumeID) } volumeType = prometheus.PrometheusBlockVolumeType - // Query capacity in MB and datastore url for block volume snapshot + // Query capacity in MB for block volume snapshot volumeIds := []cnstypes.CnsVolumeId{{Id: volumeID}} cnsVolumeDetailsMap, err := utils.QueryVolumeDetailsUtil(ctx, c.manager.VolumeManager, volumeIds) if err != nil { @@ -1475,52 +1509,15 @@ func (c *controller) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshot "cns query volume did not return the volume: %s", volumeID) } snapshotSizeInMB := cnsVolumeDetailsMap[volumeID].SizeInMB - datastoreUrl := cnsVolumeDetailsMap[volumeID].DatastoreUrl + if cnsVolumeDetailsMap[volumeID].VolumeType != common.BlockVolumeType { return nil, logger.LogNewErrorCodef(log, codes.FailedPrecondition, "queried volume doesn't have the expected volume type. Expected VolumeType: %v. "+ "Queried VolumeType: %v", volumeType, cnsVolumeDetailsMap[volumeID].VolumeType) } - // Check if snapshots number of this volume reaches the granular limit on VSAN/VVOL - maxSnapshotsPerBlockVolume := c.manager.CnsConfig.Snapshot.GlobalMaxSnapshotsPerBlockVolume - log.Infof("The limit of the maximum number of snapshots per block volume is "+ - "set to the global maximum (%v) by default.", maxSnapshotsPerBlockVolume) - if c.manager.CnsConfig.Snapshot.GranularMaxSnapshotsPerBlockVolumeInVSAN > 0 || - c.manager.CnsConfig.Snapshot.GranularMaxSnapshotsPerBlockVolumeInVVOL > 0 { - - var isGranularMaxEnabled bool - if strings.Contains(datastoreUrl, strings.ToLower(string(types.HostFileSystemVolumeFileSystemTypeVsan))) { - if c.manager.CnsConfig.Snapshot.GranularMaxSnapshotsPerBlockVolumeInVSAN > 0 { - maxSnapshotsPerBlockVolume = c.manager.CnsConfig.Snapshot.GranularMaxSnapshotsPerBlockVolumeInVSAN - isGranularMaxEnabled = true - - } - } else if strings.Contains(datastoreUrl, strings.ToLower(string(types.HostFileSystemVolumeFileSystemTypeVVOL))) { - if c.manager.CnsConfig.Snapshot.GranularMaxSnapshotsPerBlockVolumeInVVOL > 0 { - maxSnapshotsPerBlockVolume = c.manager.CnsConfig.Snapshot.GranularMaxSnapshotsPerBlockVolumeInVVOL - isGranularMaxEnabled = true - } - } - - if isGranularMaxEnabled { - log.Infof("The limit of the maximum number of snapshots per block volume on datastore %q is "+ - "overridden by the granular maximum (%v).", datastoreUrl, maxSnapshotsPerBlockVolume) - } - } - // Check if snapshots number of this volume reaches the limit - snapshotList, _, err := common.QueryVolumeSnapshotsByVolumeID(ctx, c.manager.VolumeManager, volumeID, - common.QuerySnapshotLimit) - if err != nil { - return nil, logger.LogNewErrorCodef(log, codes.Internal, - "failed to query snapshots of volume %s for the limit check. Error: %v", volumeID, err) - } - - if len(snapshotList) >= maxSnapshotsPerBlockVolume { - return nil, logger.LogNewErrorCodef(log, codes.FailedPrecondition, - "the number of snapshots on the source volume %s reaches the configured maximum (%v)", - volumeID, c.manager.CnsConfig.Snapshot.GlobalMaxSnapshotsPerBlockVolume) - } + // TODO: We may need to add logic to check the limit of max number of snapshots by using + // GlobalMaxSnapshotsPerBlockVolume etc. variables in the future. // the returned snapshotID below is a combination of CNS VolumeID and CNS SnapshotID concatenated by the "+" // sign. That is, a string of "+". Because, all other CNS snapshot APIs still require both @@ -1752,6 +1749,9 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro log.Debugf("controllerExpandVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusExpandVolumeOpType, volumeType, faultType) diff --git a/pkg/csi/service/wcp/controller_helper.go b/pkg/csi/service/wcp/controller_helper.go index 09c0da6674..dcd7dabc4a 100644 --- a/pkg/csi/service/wcp/controller_helper.go +++ b/pkg/csi/service/wcp/controller_helper.go @@ -598,7 +598,8 @@ func (c *controller) GetVolumeToHostMapping(ctx context.Context) (map[string]str // getVolumeIDToVMMap returns the csi list volume response by computing the volumeID to nodeNames map for // fake attached volumes and non-fake attached volumes. -func getVolumeIDToVMMap(ctx context.Context, c *controller, volumeIDs []string) (*csi.ListVolumesResponse, error) { +func getVolumeIDToVMMap(ctx context.Context, volumeIDs []string, vmMoidToHostMoid, + volumeIDToVMMap map[string]string) (*csi.ListVolumesResponse, error) { log := logger.GetLogger(ctx) response := &csi.ListVolumesResponse{} @@ -609,6 +610,7 @@ func getVolumeIDToVMMap(ctx context.Context, c *controller, volumeIDs []string) fakeAttachedVolumes = append(fakeAttachedVolumes, volumeID) } } + // Process fake attached volumes log.Debugf("Fake attached volumes %v", fakeAttachedVolumes) volumeIDToNodesMap := commonco.ContainerOrchestratorUtility.GetNodesForVolumes(ctx, fakeAttachedVolumes) @@ -626,13 +628,6 @@ func getVolumeIDToVMMap(ctx context.Context, c *controller, volumeIDs []string) response.Entries = append(response.Entries, entry) } - // Process remaining volumes - vmMoidToHostMoid, volumeIDToVMMap, err := c.GetVolumeToHostMapping(ctx) - if err != nil { - log.Errorf("failed to get VM MoID to Host MoID map, err:%v", err) - return nil, fmt.Errorf("failed to get VM MoID to Host MoID map, err: %v", err) - } - hostNames := commonco.ContainerOrchestratorUtility.GetNodeIDtoNameMap(ctx) if len(hostNames) == 0 { log.Errorf("no hostnames found in the NodeIDtoName map") @@ -672,6 +667,5 @@ func getVolumeIDToVMMap(ctx context.Context, c *controller, volumeIDs []string) } response.Entries = append(response.Entries, entry) } - return response, nil } diff --git a/pkg/csi/service/wcp/controller_test.go b/pkg/csi/service/wcp/controller_test.go index f78f7d21c1..b86a3327ed 100644 --- a/pkg/csi/service/wcp/controller_test.go +++ b/pkg/csi/service/wcp/controller_test.go @@ -54,6 +54,11 @@ import ( const ( testVolumeName = "test-pvc" testClusterName = "test-cluster" + // TODO: We may need to decide this value by checking GlobalMaxSnapshotsPerBlockVolume + // variable's value when it is set for WCP. + // Currently keeping this as 3, since it is the recommended value of snapshots + // per block volume in vSphere. + maxNumOfSnapshots = 3 ) var ( @@ -100,7 +105,7 @@ func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (*config. cfg.Global.VCenterIP = s.URL.Hostname() cfg.Global.VCenterPort = s.URL.Port() - cfg.Global.User = s.URL.User.Username() + cfg.Global.User = s.URL.User.Username() + "@vsphere.local" cfg.Global.Password, _ = s.URL.User.Password() cfg.Global.Datacenters = "DC0" @@ -124,11 +129,6 @@ func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (*config. Datacenters: cfg.Global.Datacenters, } - // set up the default global maximum of number of snapshots if unset - if cfg.Snapshot.GlobalMaxSnapshotsPerBlockVolume == 0 { - cfg.Snapshot.GlobalMaxSnapshotsPerBlockVolume = config.DefaultGlobalMaxSnapshotsPerBlockVolume - } - return cfg, func() { s.Close() model.Remove() @@ -630,7 +630,6 @@ func TestWCPCreateDeleteSnapshot(t *testing.T) { func TestListSnapshots(t *testing.T) { ct := getControllerTest(t) - numOfSnapshots := ct.config.Snapshot.GlobalMaxSnapshotsPerBlockVolume // Create. params := make(map[string]string) if v := os.Getenv("VSPHERE_DATASTORE_URL"); v != "" { @@ -679,7 +678,7 @@ func TestListSnapshots(t *testing.T) { snapshots := make(map[string]string) var deleteSnapshotList []string - for i := 0; i < numOfSnapshots; i++ { + for i := 0; i < maxNumOfSnapshots; i++ { // Snapshot a volume reqCreateSnapshot := &csi.CreateSnapshotRequest{ SourceVolumeId: volID, @@ -751,7 +750,6 @@ func TestListSnapshots(t *testing.T) { func TestListSnapshotsOnSpecificVolume(t *testing.T) { ct := getControllerTest(t) - numOfSnapshots := ct.config.Snapshot.GlobalMaxSnapshotsPerBlockVolume // Create. params := make(map[string]string) if v := os.Getenv("VSPHERE_DATASTORE_URL"); v != "" { @@ -800,7 +798,7 @@ func TestListSnapshotsOnSpecificVolume(t *testing.T) { snapshots := make(map[string]string) var deleteSnapshotList []string - for i := 0; i < numOfSnapshots; i++ { + for i := 0; i < maxNumOfSnapshots; i++ { // Snapshot a volume reqCreateSnapshot := &csi.CreateSnapshotRequest{ SourceVolumeId: volID, @@ -873,7 +871,6 @@ func TestListSnapshotsOnSpecificVolume(t *testing.T) { func TestListSnapshotsWithToken(t *testing.T) { ct := getControllerTest(t) - numOfSnapshots := ct.config.Snapshot.GlobalMaxSnapshotsPerBlockVolume // Create. params := make(map[string]string) if v := os.Getenv("VSPHERE_DATASTORE_URL"); v != "" { @@ -922,7 +919,7 @@ func TestListSnapshotsWithToken(t *testing.T) { snapshots := make(map[string]string) var deleteSnapshotList []string - for i := 0; i < numOfSnapshots; i++ { + for i := 0; i < maxNumOfSnapshots; i++ { // Snapshot a volume reqCreateSnapshot := &csi.CreateSnapshotRequest{ SourceVolumeId: volID, diff --git a/pkg/csi/service/wcpguest/controller.go b/pkg/csi/service/wcpguest/controller.go index ede50bd388..b8c26e7362 100644 --- a/pkg/csi/service/wcpguest/controller.go +++ b/pkg/csi/service/wcpguest/controller.go @@ -27,7 +27,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/davecgh/go-spew/spew" "github.com/fsnotify/fsnotify" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" "github.com/prometheus/client_golang/prometheus/promhttp" vmoperatortypes "github.com/vmware-tanzu/vm-operator-api/api/v1alpha1" "golang.org/x/net/context" @@ -45,6 +45,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" + cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1" commonconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" @@ -63,6 +64,8 @@ var ( csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, + csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, } ) @@ -244,7 +247,7 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ *csi.CreateVolumeResponse, string, error) { log.Infof("CreateVolume: called with args %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -274,6 +277,15 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ } volSizeMB := int64(common.RoundUpSize(volSizeBytes, common.MbInBytes)) volumeSource := req.GetVolumeContentSource() + if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.BlockVolumeSnapshot) && + volumeSource != nil { + sourceSnapshot := volumeSource.GetSnapshot() + if sourceSnapshot == nil { + return nil, csifault.CSIInvalidArgumentFault, + logger.LogNewErrorCode(log, codes.InvalidArgument, "unsupported VolumeContentSource type") + } + volumeSnapshotName = sourceSnapshot.GetSnapshotId() + } // Get supervisorStorageClass and accessMode var supervisorStorageClass string @@ -303,10 +315,6 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ } annotations[common.AnnGuestClusterRequestedTopology] = topologyAnnotation } - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.BlockVolumeSnapshot) && - volumeSource != nil { - volumeSnapshotName = volumeSource.GetSnapshot().GetSnapshotId() - } claim := getPersistentVolumeClaimSpecWithStorageClass(supervisorPVCName, c.supervisorNamespace, diskSize, supervisorStorageClass, getAccessMode(accessMode), annotations, volumeSnapshotName) log.Debugf("PVC claim spec is %+v", spew.Sdump(claim)) @@ -332,12 +340,29 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ c.supervisorNamespace, err) log.Error(msg) eventList, err := c.supervisorClient.CoreV1().Events(c.supervisorNamespace).List(ctx, - metav1.ListOptions{FieldSelector: "involvedObject.name=" + pvc.Name}) + metav1.ListOptions{ + FieldSelector: "involvedObject.name=" + pvc.Name, + ResourceVersion: pvc.ResourceVersion, + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + }) if err != nil { log.Errorf("Unable to fetch events for pvc %q/%q from supervisor cluster with err: %+v", c.supervisorNamespace, pvc.Name, err) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) } + + var failureMessage string + for _, svcPvcEvent := range eventList.Items { + if svcPvcEvent.Type == corev1.EventTypeWarning { + failureMessage = svcPvcEvent.Message + break + } + } + + if failureMessage != "" { + msg = fmt.Sprintf("%s. reason: %s", msg, failureMessage) + } + log.Errorf("Last observed events on the pvc %q/%q in supervisor cluster: %+v", c.supervisorNamespace, pvc.Name, spew.Sdump(eventList.Items)) return nil, csifault.CSIInternalFault, status.Errorf(codes.Internal, msg) @@ -405,6 +430,9 @@ func (c *controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequ resp, faultType, err := createVolumeInternal() log.Debugf("createVolumeInternal: returns fault %q", faultType) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusCreateVolumeOpType, volumeType, faultType) @@ -431,7 +459,7 @@ func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequ deleteVolumeInternal := func() ( *csi.DeleteVolumeResponse, string, error) { log.Infof("DeleteVolume: called with args: %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -481,6 +509,9 @@ func (c *controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequ resp, faultType, err := deleteVolumeInternal() log.Debugf("deleteVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusDeleteVolumeOpType, volumeType, faultType) @@ -506,7 +537,7 @@ func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.Contr controllerPublishVolumeInternal := func() ( *csi.ControllerPublishVolumeResponse, string, error) { log.Infof("ControllerPublishVolume: called with args %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -542,6 +573,9 @@ func (c *controller) ControllerPublishVolume(ctx context.Context, req *csi.Contr resp, faultType, err := controllerPublishVolumeInternal() if err != nil { log.Debugf("controllerPublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusAttachVolumeOpType, volumeType, faultType) @@ -678,7 +712,7 @@ func controllerPublishForBlockVolume(ctx context.Context, req *csi.ControllerPub log.Debugf("disk UUID %v is set for the volume: %q ", diskUUID, req.VolumeId) } - //return PublishContext with diskUUID of the volume attached to node. + // return PublishContext with diskUUID of the volume attached to node. publishInfo := make(map[string]string) publishInfo[common.AttributeDiskType] = common.DiskTypeBlockVolume publishInfo[common.AttributeFirstClassDiskUUID] = common.FormatDiskUUID(diskUUID) @@ -842,7 +876,7 @@ func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.Con controllerUnpublishVolumeInternal := func() ( *csi.ControllerUnpublishVolumeResponse, string, error) { log.Infof("ControllerUnpublishVolume: called with args %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -885,6 +919,9 @@ func (c *controller) ControllerUnpublishVolume(ctx context.Context, req *csi.Con resp, faultType, err := controllerUnpublishVolumeInternal() log.Debugf("controllerUnpublishVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusDetachVolumeOpType, volumeType, faultType) @@ -1131,7 +1168,7 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro return nil, csifault.CSIUnimplementedFault, status.Error(codes.Unimplemented, msg) } log.Infof("ControllerExpandVolume: called with args %+v", *req) - //TODO: If the err is returned by invoking CNS API, then faultType should be + // TODO: If the err is returned by invoking CNS API, then faultType should be // populated by the underlying layer. // If the request failed due to validate the request, "csi.fault.InvalidArgument" will be return. // If thr reqeust failed due to object not found, "csi.fault.NotFound" will be return. @@ -1253,6 +1290,9 @@ func (c *controller) ControllerExpandVolume(ctx context.Context, req *csi.Contro resp, faultType, err := controllerExpandVolumeInternal() log.Debugf("controllerExpandVolumeInternal: returns fault %q for volume %q", faultType, req.VolumeId) if err != nil { + if csifault.IsNonStorageFault(faultType) { + faultType = csifault.AddCsiNonStoragePrefix(ctx, faultType) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", prometheus.PrometheusExpandVolumeOpType, volumeType, faultType) @@ -1306,10 +1346,6 @@ func (c *controller) ControllerGetCapabilities(ctx context.Context, req *csi.Con ctx = logger.NewContextWithLogger(ctx) log := logger.GetLogger(ctx) log.Infof("ControllerGetCapabilities: called with args %+v", *req) - if commonco.ContainerOrchestratorUtility.IsFSSEnabled(ctx, common.BlockVolumeSnapshot) { - controllerCaps = append(controllerCaps, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, - csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) - } var caps []*csi.ControllerServiceCapability for _, cap := range controllerCaps { c := &csi.ControllerServiceCapability{ @@ -1370,8 +1406,12 @@ func (c *controller) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshot if err != nil { if errors.IsNotFound(err) { // New createSnapshot request on the guest + // Add "csi.vsphere.guest-initiated-csi-snapshot" annotation on VolumeSnapshot CR in + // the supervisor cluster to indicate that snapshot creation is initiated from Guest cluster + annotation := make(map[string]string) + annotation[common.SupervisorVolumeSnapshotAnnotationKey] = "true" supVolumeSnapshot := constructVolumeSnapshotWithVolumeSnapshotClass(supervisorVolumeSnapshotName, - c.supervisorNamespace, supervisorVolumeSnapshotClass, supervisorPVCName) + c.supervisorNamespace, supervisorVolumeSnapshotClass, supervisorPVCName, annotation) log.Infof("Supervisosr VolumeSnapshot Spec: %+v", supVolumeSnapshot) _, err = c.supervisorSnapshotterClient.SnapshotV1().VolumeSnapshots( c.supervisorNamespace).Create(ctx, supVolumeSnapshot, metav1.CreateOptions{}) @@ -1394,7 +1434,7 @@ func (c *controller) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshot // Wait for VolumeSnapshot to be ready to use isReady, vs, err := common.IsVolumeSnapshotReady(ctx, c.supervisorSnapshotterClient, supervisorVolumeSnapshotName, c.supervisorNamespace, - time.Duration(getProvisionTimeoutInMin(ctx))*time.Minute) + time.Duration(getSnapshotTimeoutInMin(ctx))*time.Minute) if !isReady { msg := fmt.Sprintf("volumesnapshot: %s on namespace: %s in supervisor cluster was not Ready. "+ "Error: %+v", supervisorVolumeSnapshotName, c.supervisorNamespace, err) diff --git a/pkg/csi/service/wcpguest/controller_helper.go b/pkg/csi/service/wcpguest/controller_helper.go index 76532f4bb7..a5579cc66b 100644 --- a/pkg/csi/service/wcpguest/controller_helper.go +++ b/pkg/csi/service/wcpguest/controller_helper.go @@ -28,7 +28,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/container-storage-interface/spec/lib/go/csi" - snap "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + snap "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "google.golang.org/grpc/codes" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -52,6 +52,10 @@ const ( // Default timeout for resize, used unless overridden by user in // csi-controller YAML. defaultResizeTimeoutInMin = 4 + + // Default timeout for create snapshot, used unless overridden by user in + // csi-controller YAML. + defaultSnapshotTimeoutInMin = 4 ) // validateGuestClusterCreateVolumeRequest is the helper function to validate @@ -228,11 +232,12 @@ func getPersistentVolumeClaimSpecWithStorageClass(pvcName string, namespace stri } func constructVolumeSnapshotWithVolumeSnapshotClass(volumeSnapshotName string, namespace string, - volumeSnapshotClassName string, pvcName string) *snap.VolumeSnapshot { + volumeSnapshotClassName string, pvcName string, annotation map[string]string) *snap.VolumeSnapshot { volumeSnapshot := &snap.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ - Name: volumeSnapshotName, - Namespace: namespace, + Name: volumeSnapshotName, + Namespace: namespace, + Annotations: annotation, }, Spec: snap.VolumeSnapshotSpec{ Source: snap.VolumeSnapshotSource{ @@ -343,6 +348,30 @@ func getProvisionTimeoutInMin(ctx context.Context) int { return provisionTimeoutInMin } +// getSnapshotTimeoutInMin return the timeout for volume snapshot. +// If environment variable SNAPSHOT_TIMEOUT_MINUTES is set and valid, +// return the interval value read from environment variable +// otherwise, use the default timeout 4 mins +func getSnapshotTimeoutInMin(ctx context.Context) int { + log := logger.GetLogger(ctx) + snapshotTimeoutInMin := defaultSnapshotTimeoutInMin + if v := os.Getenv("SNAPSHOT_TIMEOUT_MINUTES"); v != "" { + if value, err := strconv.Atoi(v); err == nil { + if value <= 0 { + log.Warnf(" snapshotTimeout set in env variable SNAPSHOT_TIMEOUT_MINUTES %s "+ + "is equal or less than 0, will use the default timeout", v) + } else { + snapshotTimeoutInMin = value + log.Infof("snapshotTimeout is set to %d minutes", snapshotTimeoutInMin) + } + } else { + log.Warnf("snapshotTimeout set in env variable SNAPSHOT_TIMEOUT_MINUTES %s is invalid, "+ + "will use the default timeout", v) + } + } + return snapshotTimeoutInMin +} + // getResizeTimeoutInMin returns the timeout for volume resize. // If environment variable RESIZE_TIMEOUT_MINUTES is set and valid, // return the interval value read from environment variable diff --git a/pkg/internalapis/featurestates/featurestates.go b/pkg/internalapis/featurestates/featurestates.go index 9b35d7ffb0..94c2b7a21a 100644 --- a/pkg/internalapis/featurestates/featurestates.go +++ b/pkg/internalapis/featurestates/featurestates.go @@ -163,7 +163,7 @@ func StartSvFSSReplicationService(ctx context.Context, svFeatureStatConfigMapNam log.Errorf("failed to create dynamic informer for %s CR. Error: %+v", CRDPlural, err) return err } - dynInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = dynInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ // Add. AddFunc: nil, // Update. @@ -173,6 +173,9 @@ func StartSvFSSReplicationService(ctx context.Context, svFeatureStatConfigMapNam fssCRDeleted(obj) }, }) + if err != nil { + return err + } go func() { log.Infof("Informer to watch on %s CR starting..", CRDPlural) dynInformer.Informer().Run(make(chan struct{})) diff --git a/pkg/kubernetes/informers.go b/pkg/kubernetes/informers.go index d516483250..f11b232f38 100644 --- a/pkg/kubernetes/informers.go +++ b/pkg/kubernetes/informers.go @@ -99,11 +99,14 @@ func (im *InformerManager) AddNodeListener( im.nodeInformer = im.informerFactory.Core().V1().Nodes().Informer() } - im.nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } } // AddCSINodeNodeListener hooks up add, update, delete callbacks. @@ -113,11 +116,14 @@ func (im *InformerManager) AddCSINodeListener( im.nodeInformer = im.informerFactory.Storage().V1().CSINodes().Informer() } - im.nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } } // AddPVCListener hooks up add, update, delete callbacks. @@ -128,11 +134,14 @@ func (im *InformerManager) AddPVCListener( } im.pvcSynced = im.pvcInformer.HasSynced - im.pvcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.pvcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } } // AddPVListener hooks up add, update, delete callbacks. @@ -143,11 +152,14 @@ func (im *InformerManager) AddPVListener( } im.pvSynced = im.pvInformer.HasSynced - im.pvInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.pvInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } } // AddNamespaceListener hooks up add, update, delete callbacks. @@ -158,11 +170,14 @@ func (im *InformerManager) AddNamespaceListener( } im.namespaceSynced = im.namespaceInformer.HasSynced - im.namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } } // AddConfigMapListener hooks up add, update, delete callbacks. @@ -175,11 +190,14 @@ func (im *InformerManager) AddConfigMapListener( } im.configMapSynced = im.configMapInformer.HasSynced - im.configMapInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.configMapInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } stopCh := make(chan struct{}) // Since NewFilteredConfigMapInformer is not part of the informer factory, // we need to invoke the Run() explicitly to start the shared informer. @@ -194,11 +212,14 @@ func (im *InformerManager) AddPodListener( } im.podSynced = im.podInformer.HasSynced - im.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } } // AddVolumeAttachmentListener hooks up add, update, delete callbacks. @@ -208,11 +229,14 @@ func (im *InformerManager) AddVolumeAttachmentListener( im.volumeAttachmentInformer = im.informerFactory.Storage().V1().VolumeAttachments().Informer() } - im.volumeAttachmentInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := im.volumeAttachmentInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: add, UpdateFunc: update, DeleteFunc: remove, }) + if err != nil { + return + } } // GetPVLister returns PV Lister for the calling informer manager. diff --git a/pkg/kubernetes/kubernetes.go b/pkg/kubernetes/kubernetes.go index 75f250769c..da677dcfbd 100644 --- a/pkg/kubernetes/kubernetes.go +++ b/pkg/kubernetes/kubernetes.go @@ -46,11 +46,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiutils "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" storagev1 "k8s.io/api/storage/v1" cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" migrationv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/migration/v1alpha1" - cnsvsphere "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/cns-lib/vsphere" cnsconfig "sigs.k8s.io/vsphere-csi-driver/v3/pkg/common/config" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/types" @@ -328,21 +327,9 @@ func CreateKubernetesClientFromConfig(kubeConfigPath string) (clientset.Interfac // If not set, returns node UUID from K8s CSINode API // object. func GetNodeUUID(ctx context.Context, - k8sclient clientset.Interface, nodeName string, - useK8sCSINodeObj bool) (string, error) { + k8sclient clientset.Interface, nodeName string) (string, error) { log := logger.GetLogger(ctx) - log.Infof("GetNodeUUID called for the node: %q with useK8sCSINodeObj: %t", - nodeName, useK8sCSINodeObj) - if !useK8sCSINodeObj { - node, err := k8sclient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - if err != nil { - log.Errorf("failed to get kubernetes node with the name: %q. Err: %v", nodeName, err) - return "", err - } - k8sNodeUUID := cnsvsphere.GetUUIDFromProviderID(node.Spec.ProviderID) - log.Infof("Retrieved node UUID: %q for the node: %q", k8sNodeUUID, nodeName) - return k8sNodeUUID, nil - } + log.Infof("GetNodeUUID called for the node: %q", nodeName) node, err := k8sclient.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { log.Errorf("failed to get K8s CSINode with the name: %q. "+ diff --git a/pkg/syncer/admissionhandler/admissionhandler.go b/pkg/syncer/admissionhandler/admissionhandler.go index a1ff1d92a9..08e52ab54b 100644 --- a/pkg/syncer/admissionhandler/admissionhandler.go +++ b/pkg/syncer/admissionhandler/admissionhandler.go @@ -140,7 +140,11 @@ func StartWebhookServer(ctx context.Context) error { if clusterFlavor == cnstypes.CnsClusterFlavorWorkload { featureGateTKGSHaEnabled = containerOrchestratorUtility.IsFSSEnabled(ctx, common.TKGsHA) featureGateVolumeHealthEnabled = containerOrchestratorUtility.IsFSSEnabled(ctx, common.VolumeHealth) + featureGateBlockVolumeSnapshotEnabled = containerOrchestratorUtility.IsFSSEnabled(ctx, common.BlockVolumeSnapshot) startCNSCSIWebhookManager(ctx) + } else if clusterFlavor == cnstypes.CnsClusterFlavorGuest { + featureGateBlockVolumeSnapshotEnabled = containerOrchestratorUtility.IsFSSEnabled(ctx, common.BlockVolumeSnapshot) + startPVCSIWebhookManager(ctx) } else if clusterFlavor == cnstypes.CnsClusterFlavorVanilla { if cfg == nil { cfg, err = getWebHookConfig(ctx) @@ -164,8 +168,17 @@ func StartWebhookServer(ctx context.Context) error { cfg.WebHookConfig.Port = defaultWebhookServerPort } server = &http.Server{ - Addr: fmt.Sprintf(":%v", cfg.WebHookConfig.Port), - TLSConfig: &tls.Config{Certificates: []tls.Certificate{certs}}, + Addr: fmt.Sprintf(":%v", cfg.WebHookConfig.Port), + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{certs}, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + }, + MinVersion: tls.VersionTLS12, + }, } // Define http server and server handler. mux := http.NewServeMux() @@ -255,7 +268,7 @@ func validationHandler(w http.ResponseWriter, r *http.Request) { case "StorageClass": admissionResponse = validateStorageClass(ctx, &ar) case "PersistentVolumeClaim": - admissionResponse = validatePVC(ctx, &ar) + admissionResponse = validatePVC(ctx, ar.Request) default: log.Infof("Skipping validation for resource type: %q", ar.Request.Kind.Kind) admissionResponse = &admissionv1.AdmissionResponse{ diff --git a/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go b/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go index 08778b5bfd..25775126e3 100644 --- a/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go +++ b/pkg/syncer/admissionhandler/cnscsi_admissionhandler.go @@ -2,6 +2,7 @@ package admissionhandler import ( "context" + "crypto/tls" "fmt" "os" "strconv" @@ -65,6 +66,13 @@ func startCNSCSIWebhookManager(ctx context.Context) { log.Infof("registering validating webhook with the endpoint %v", ValidationWebhookPath) // we should not allow TLS < 1.2 mgr.GetWebhookServer().TLSMinVersion = WebhookTlsMinVersion + // CipherSuites allows us to specify TLS 1.2 cipher suites that have been recommended by the Security team + mgr.GetWebhookServer().TLSOpts = []func(*tls.Config){ + func(t *tls.Config) { + t.CipherSuites = []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384} + }, + } mgr.GetWebhookServer().Register(ValidationWebhookPath, &webhook.Admission{Handler: &CSISupervisorWebhook{ Client: mgr.GetClient(), clientConfig: mgr.GetConfig(), @@ -97,6 +105,17 @@ func (h *CSISupervisorWebhook) Handle(ctx context.Context, req admission.Request } if featureGateVolumeHealthEnabled { resp = validatePVCAnnotationForVolumeHealth(ctx, req) + if !resp.Allowed { + return + } + } + if featureGateBlockVolumeSnapshotEnabled { + admissionResp := validatePVC(ctx, &req.AdmissionRequest) + resp.AdmissionResponse = *admissionResp.DeepCopy() + } + } else if req.Kind.Kind == "VolumeSnapshot" { + if featureGateBlockVolumeSnapshotEnabled { + resp = validateSnapshotOperationSupervisorRequest(ctx, req) } } return diff --git a/pkg/syncer/admissionhandler/pvcsi_admissionhandler.go b/pkg/syncer/admissionhandler/pvcsi_admissionhandler.go new file mode 100644 index 0000000000..8b29da9263 --- /dev/null +++ b/pkg/syncer/admissionhandler/pvcsi_admissionhandler.go @@ -0,0 +1,98 @@ +package admissionhandler + +import ( + "context" + "fmt" + "os" + "strconv" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + crConfig "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" +) + +const ( + PVCSIValidationWebhookPath = "/validate" + PVCSIDefaultWebhookPort = 9883 + PVCSIDefaultWebhookMetricsBindAddress = "0" + PVCSIWebhookTlsMinVersion = "1.2" +) + +func getPVCSIWebhookPort() int { + portStr, ok := os.LookupEnv("PVCSI_WEBHOOK_SERVICE_CONTAINER_PORT") + if !ok { + return DefaultWebhookPort + } + + result, err := strconv.ParseInt(portStr, 0, 0) + if err != nil { + panic(fmt.Sprintf("malformed configuration: PVCSI_WEBHOOK_SERVICE_CONTAINER_PORT, expected int: %v", err)) + } + + return int(result) +} + +func getPVCSIMetricsBindAddress() string { + metricsAddr, ok := os.LookupEnv("PVCSI_WEBHOOK_SERVICE_METRICS_BIND_ADDR") + if !ok { + return DefaultWebhookMetricsBindAddress + } + + return metricsAddr +} + +// startPVCSIWebhookManager starts the webhook server in guest cluster +func startPVCSIWebhookManager(ctx context.Context) { + log := logger.GetLogger(ctx) + + webhookPort := getPVCSIWebhookPort() + metricsBindAddress := getPVCSIMetricsBindAddress() + log.Infof("setting up webhook manager with webhookPort %v and metricsBindAddress %v", + webhookPort, metricsBindAddress) + mgr, err := manager.New(crConfig.GetConfigOrDie(), manager.Options{ + MetricsBindAddress: metricsBindAddress, + Port: webhookPort}) + if err != nil { + log.Fatal(err, "unable to set up overall controller manager") + } + + log.Infof("registering validating webhook with the endpoint %v", PVCSIValidationWebhookPath) + // we should not allow TLS < 1.2 + mgr.GetWebhookServer().TLSMinVersion = PVCSIWebhookTlsMinVersion + mgr.GetWebhookServer().Register(PVCSIValidationWebhookPath, &webhook.Admission{Handler: &CSIGuestWebhook{ + Client: mgr.GetClient(), + clientConfig: mgr.GetConfig(), + }}) + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Fatal(err, "unable to run the webhook manager") + } +} + +var _ admission.Handler = &CSIGuestWebhook{} + +type CSIGuestWebhook struct { + client.Client + clientConfig *rest.Config +} + +func (h *CSIGuestWebhook) Handle(ctx context.Context, req admission.Request) (resp admission.Response) { + log := logger.GetLogger(ctx) + log.Debugf("PV-CSI validation webhook handler called with request: %+v", req) + defer log.Debugf("PV-CSI validation webhook handler completed for the request: %+v", req) + + resp = admission.Allowed("") + if req.Kind.Kind == "PersistentVolumeClaim" { + if featureGateBlockVolumeSnapshotEnabled { + admissionResp := validatePVC(ctx, &req.AdmissionRequest) + resp.AdmissionResponse = *admissionResp.DeepCopy() + } + } + return +} diff --git a/pkg/syncer/admissionhandler/validatepvc.go b/pkg/syncer/admissionhandler/validatepvc.go index a8e0e7aa48..47c068dabb 100644 --- a/pkg/syncer/admissionhandler/validatepvc.go +++ b/pkg/syncer/admissionhandler/validatepvc.go @@ -6,7 +6,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,7 +21,7 @@ const ( ) // validatePVC helps validate AdmissionReview requests for PersistentVolumeClaim. -func validatePVC(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse { +func validatePVC(ctx context.Context, req *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse { if !featureGateBlockVolumeSnapshotEnabled { // If CSI block volume snapshot is disabled and webhook is running, // skip validation for PersistentVolumeClaim. @@ -30,7 +30,7 @@ func validatePVC(ctx context.Context, ar *admissionv1.AdmissionReview) *admissio } } - if ar.Request.Operation != admissionv1.Update && ar.Request.Operation != admissionv1.Delete { + if req.Operation != admissionv1.Update && req.Operation != admissionv1.Delete { // If AdmissionReview request operation is out of expectation, // skip validation for PersistentVolumeClaim. return &admissionv1.AdmissionResponse{ @@ -39,7 +39,6 @@ func validatePVC(ctx context.Context, ar *admissionv1.AdmissionReview) *admissio } log := logger.GetLogger(ctx) - req := ar.Request var result *metav1.Status allowed := true @@ -49,7 +48,7 @@ func validatePVC(ctx context.Context, ar *admissionv1.AdmissionReview) *admissio log.Debugf("JSON req.OldObject.Raw: %v", string(req.OldObject.Raw)) // req.OldObject is null for CREATE and CONNECT operations. if err := json.Unmarshal(req.OldObject.Raw, &oldPVC); err != nil { - log.Warnf("error deserializing old pvc: %v. skipping validation.", err) + log.Errorf("error deserializing old pvc: %v. skipping validation.", err) return &admissionv1.AdmissionResponse{ // skip validation if there is pvc deserialization error Allowed: true, @@ -71,7 +70,7 @@ func validatePVC(ctx context.Context, ar *admissionv1.AdmissionReview) *admissio log.Debugf("JSON req.Object.Raw: %v", string(req.Object.Raw)) // req.Object is null for DELETE operations. if err := json.Unmarshal(req.Object.Raw, &newPVC); err != nil { - log.Warnf("error deserializing old pvc: %v. skipping validation.", err) + log.Errorf("error deserializing old pvc: %v. skipping validation.", err) return &admissionv1.AdmissionResponse{ // skip validation if there is pvc deserialization error Allowed: true, @@ -152,7 +151,7 @@ func getPVReclaimPolicyForPVC(ctx context.Context, pvc corev1.PersistentVolumeCl pv, err := kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { return result, logger.LogNewErrorf(log, "failed to get PV %v with error: %v. "+ - "Stopping getting reclaim policy for PVC, %s/%s", err, pvc.Spec.VolumeName, pvc.Namespace, pvc.Name) + "Stopping getting reclaim policy for PVC, %s/%s", pvc.Spec.VolumeName, err, pvc.Namespace, pvc.Name) } return pv.Spec.PersistentVolumeReclaimPolicy, nil diff --git a/pkg/syncer/admissionhandler/validatepvc_test.go b/pkg/syncer/admissionhandler/validatepvc_test.go index 2da90344bd..c18f18844c 100644 --- a/pkg/syncer/admissionhandler/validatepvc_test.go +++ b/pkg/syncer/admissionhandler/validatepvc_test.go @@ -9,9 +9,9 @@ import ( "k8s.io/client-go/kubernetes/fake" "github.com/agiledragon/gomonkey/v2" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" - snapshotclientfake "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + snapshotclientfake "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake" "github.com/stretchr/testify/assert" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" @@ -429,7 +429,7 @@ func TestValidatePVC(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - actualResponse := validatePVC(ctx, test.admissionReview) + actualResponse := validatePVC(ctx, test.admissionReview.Request) assert.Equal(t, actualResponse, test.expectedResponse) }) } diff --git a/pkg/syncer/admissionhandler/validatepvcannotationfortkgsha.go b/pkg/syncer/admissionhandler/validatepvcannotationfortkgsha.go index 3a31ef3c32..8eaa5b3cee 100644 --- a/pkg/syncer/admissionhandler/validatepvcannotationfortkgsha.go +++ b/pkg/syncer/admissionhandler/validatepvcannotationfortkgsha.go @@ -22,9 +22,13 @@ const ( func validatePVCAnnotationForTKGSHA(ctx context.Context, request admission.Request) admission.Response { log := logger.GetLogger(ctx) log.Debugf("validatePVCAnnotationForTKGSHA called with the request %v", request) - + if request.Operation == admissionv1.Delete { + // PVC tkgs ha annotation validation is not required for delete PVC calls + return admission.Allowed("") + } newPVC := corev1.PersistentVolumeClaim{} if err := json.Unmarshal(request.Object.Raw, &newPVC); err != nil { + log.Errorf("error unmarshalling pvc: %v", err) reason := "skipped validation when failed to deserialize PVC from new request object" log.Warn(reason) return admission.Allowed(reason) @@ -39,6 +43,7 @@ func validatePVCAnnotationForTKGSHA(ctx context.Context, request admission.Reque } else if request.Operation == admissionv1.Update { oldPVC := corev1.PersistentVolumeClaim{} if err := json.Unmarshal(request.OldObject.Raw, &oldPVC); err != nil { + log.Errorf("error unmarshalling pvc: %v", err) reason := "skipped validation when failed to deserialize PVC from old request object" log.Warn(reason) return admission.Allowed(reason) diff --git a/pkg/syncer/admissionhandler/validatepvcannotationforvolumehealth.go b/pkg/syncer/admissionhandler/validatepvcannotationforvolumehealth.go index b5994166f3..113a2c898e 100644 --- a/pkg/syncer/admissionhandler/validatepvcannotationforvolumehealth.go +++ b/pkg/syncer/admissionhandler/validatepvcannotationforvolumehealth.go @@ -26,8 +26,13 @@ func validatePVCAnnotationForVolumeHealth(ctx context.Context, request admission username := request.UserInfo.Username isCSIServiceAccount := validateCSIServiceAccount(request.UserInfo.Username) log.Debugf("validatePVCAnnotationForVolumeHealth called with the request %v by user: %v", request, username) + if request.Operation == admissionv1.Delete { + // PVC volume health annotation validation is not required for delete PVC calls + return admission.Allowed("") + } newPVC := corev1.PersistentVolumeClaim{} if err := json.Unmarshal(request.Object.Raw, &newPVC); err != nil { + log.Errorf("error unmarshalling pvc: %v", err) reason := "skipped validation when failed to deserialize PVC from new request object" log.Warn(reason) return admission.Allowed(reason) @@ -41,6 +46,7 @@ func validatePVCAnnotationForVolumeHealth(ctx context.Context, request admission } else if request.Operation == admissionv1.Update { oldPVC := corev1.PersistentVolumeClaim{} if err := json.Unmarshal(request.OldObject.Raw, &oldPVC); err != nil { + log.Errorf("error unmarshalling pvc: %v", err) reason := "skipped validation when failed to deserialize PVC from old request object" log.Warn(reason) return admission.Allowed(reason) diff --git a/pkg/syncer/admissionhandler/validatesnapshotoperationsupervisorrequest.go b/pkg/syncer/admissionhandler/validatesnapshotoperationsupervisorrequest.go new file mode 100644 index 0000000000..090f7845b2 --- /dev/null +++ b/pkg/syncer/admissionhandler/validatesnapshotoperationsupervisorrequest.go @@ -0,0 +1,41 @@ +package admissionhandler + +import ( + "context" + "encoding/json" + + admissionv1 "k8s.io/api/admission/v1" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/common" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/csi/service/logger" + + snap "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" +) + +const ( + SnapshotOperationNotAllowed = "Snapshot creation initiated directly from the Supervisor cluster " + + "is not supported. Please initiate snapshot creation from the Guest cluster." +) + +// Disallow any opertion on volume snapshot initiated by user directly on the supervisor cluster. +// Currently we only allow snapshot operation initiated from the guest cluster. +func validateSnapshotOperationSupervisorRequest(ctx context.Context, request admission.Request) admission.Response { + log := logger.GetLogger(ctx) + log.Debugf("validateSnapshotOperationSupervisorRequest called with the request %v", request) + newVS := snap.VolumeSnapshot{} + if err := json.Unmarshal(request.Object.Raw, &newVS); err != nil { + reason := "Failed to deserialize VolumeSnapshot from new request object" + log.Warn(reason) + return admission.Denied(reason) + } + + if request.Operation == admissionv1.Create { + if _, annotationFound := newVS.Annotations[common.SupervisorVolumeSnapshotAnnotationKey]; !annotationFound { + return admission.Denied(SnapshotOperationNotAllowed) + } + } + + log.Debugf("validateSnapshotOperationSupervisorRequest completed for the request %v", request) + return admission.Allowed("") +} diff --git a/pkg/syncer/admissionhandler/validatestorageclass.go b/pkg/syncer/admissionhandler/validatestorageclass.go index a7bd1862a9..03d87b3e19 100644 --- a/pkg/syncer/admissionhandler/validatestorageclass.go +++ b/pkg/syncer/admissionhandler/validatestorageclass.go @@ -43,8 +43,7 @@ var ( ) const ( - volumeExpansionErrorMessage = "AllowVolumeExpansion can not be set to true on the in-tree vSphere StorageClass" - migrationParamErrorMessage = "Invalid StorageClass Parameters. " + + migrationParamErrorMessage = "Invalid StorageClass Parameters. " + "Migration specific parameters should not be used in the StorageClass" ) @@ -75,15 +74,7 @@ func validateStorageClass(ctx context.Context, ar *admissionv1.AdmissionReview) } } log.Infof("Validating StorageClass: %q", sc.Name) - // AllowVolumeExpansion check for kubernetes.io/vsphere-volume provisioner. - if sc.Provisioner == "kubernetes.io/vsphere-volume" { - if sc.AllowVolumeExpansion != nil && *sc.AllowVolumeExpansion { - allowed = false - result = &metav1.Status{ - Reason: volumeExpansionErrorMessage, - } - } - } else if sc.Provisioner == "csi.vsphere.vmware.com" { + if sc.Provisioner == "csi.vsphere.vmware.com" { // Migration parameters check for csi.vsphere.vmware.com provisioner. for param := range sc.Parameters { if unSupportedParameters.Has(param) { diff --git a/pkg/syncer/admissionhandler/validatestorageclass_test.go b/pkg/syncer/admissionhandler/validatestorageclass_test.go index dcccdedc8c..f71da2183a 100644 --- a/pkg/syncer/admissionhandler/validatestorageclass_test.go +++ b/pkg/syncer/admissionhandler/validatestorageclass_test.go @@ -51,12 +51,12 @@ func TestValidateStorageClassForAllowVolumeExpansion(t *testing.T) { "\"volumeBindingMode\": \"Immediate\"\n}"), } admissionResponse := validateStorageClass(ctx, &admissionReview) - if !strings.Contains(string(admissionResponse.Result.Reason), volumeExpansionErrorMessage) || - admissionResponse.Allowed { + if admissionResponse.Allowed { + t.Log("TestValidateStorageClassForAllowVolumeExpansion Passed") + } else { t.Fatalf("TestValidateStorageClassForAllowVolumeExpansion failed. "+ "admissionReview.Request: %v, admissionResponse: %v", admissionReview.Request, admissionResponse) } - t.Log("TestValidateStorageClassForAllowVolumeExpansion Passed") } // TestValidateStorageClassForMigrationParameter is the unit test for validating diff --git a/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go b/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go index cf95d8bc88..d1dec85132 100644 --- a/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go +++ b/pkg/syncer/cnsoperator/controller/cnsnodevmattachment/cnsnodevmattachment_controller.go @@ -630,6 +630,9 @@ func (r *ReconcileCnsNodeVMAttachment) Reconcile(ctx context.Context, // This can happen when reconciler returns reconcile.Result{RequeueAfter: timeout}, the err will be set to nil, // and corresponding faulttype will be set // for this case, we need count it as an attach/detach failure + if csifault.IsNonStorageFault(faulttype) { + faulttype = csifault.AddCsiNonStoragePrefix(ctx, faulttype) + } log.Errorf("Operation failed, reporting failure status to Prometheus."+ " Operation Type: %q, Volume Type: %q, Fault Type: %q", volumeOpType, volumeType, faulttype) diff --git a/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller.go b/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller.go index 531653674e..45b0042cb2 100644 --- a/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller.go +++ b/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller.go @@ -110,7 +110,6 @@ func Add(mgr manager.Manager, clusterFlavor cnstypes.CnsClusterFlavor, } } - useNodeUuid := coCommonInterface.IsFSSEnabled(ctx, common.UseCSINodeId) isMultiVCFSSEnabled := coCommonInterface.IsFSSEnabled(ctx, common.MultiVCenterCSITopology) // Initialize kubernetes client. k8sclient, err := k8s.NewClient(ctx) @@ -128,20 +127,19 @@ func Add(mgr manager.Manager, clusterFlavor cnstypes.CnsClusterFlavor, ) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: csinodetopologyv1alpha1.GroupName}) - return add(mgr, newReconciler(mgr, configInfo, recorder, useNodeUuid, + return add(mgr, newReconciler(mgr, configInfo, recorder, enableTKGsHAinGuest, isMultiVCFSSEnabled, vmOperatorClient, supervisorNamespace)) } // newReconciler returns a new `reconcile.Reconciler`. func newReconciler(mgr manager.Manager, configInfo *cnsconfig.ConfigurationInfo, recorder record.EventRecorder, - useNodeUuid bool, enableTKGsHAinGuest bool, isMultiVCFSSEnabled bool, vmOperatorClient client.Client, + enableTKGsHAinGuest bool, isMultiVCFSSEnabled bool, vmOperatorClient client.Client, supervisorNamespace string) reconcile.Reconciler { return &ReconcileCSINodeTopology{ client: mgr.GetClient(), scheme: mgr.GetScheme(), configInfo: configInfo, recorder: recorder, - useNodeUuid: useNodeUuid, enableTKGsHAinGuest: enableTKGsHAinGuest, isMultiVCFSSEnabled: isMultiVCFSSEnabled, vmOperatorClient: vmOperatorClient, @@ -207,7 +205,6 @@ type ReconcileCSINodeTopology struct { scheme *runtime.Scheme configInfo *cnsconfig.ConfigurationInfo recorder record.EventRecorder - useNodeUuid bool enableTKGsHAinGuest bool isMultiVCFSSEnabled bool vmOperatorClient client.Client @@ -272,16 +269,16 @@ func (r *ReconcileCSINodeTopology) reconcileForVanilla(ctx context.Context, requ return reconcile.Result{RequeueAfter: timeout}, nil } - if r.useNodeUuid && clusterFlavor == cnstypes.CnsClusterFlavorVanilla { + if clusterFlavor == cnstypes.CnsClusterFlavorVanilla { nodeID = instance.Spec.NodeUUID if nodeID != "" { - nodeVM, err = nodeManager.GetNode(ctx, nodeID, nil) + nodeVM, err = nodeManager.GetNodeVMAndUpdateCache(ctx, nodeID, nil) } else { return reconcile.Result{RequeueAfter: timeout}, nil } } else { nodeID = instance.Spec.NodeID - nodeVM, err = nodeManager.GetNodeByName(ctx, nodeID) + nodeVM, err = nodeManager.GetNodeVMByNameAndUpdateCache(ctx, nodeID) } if err != nil { if err == node.ErrNodeNotFound { diff --git a/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller_test.go b/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller_test.go index 455ff839f6..e938b8eb95 100644 --- a/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller_test.go +++ b/pkg/syncer/cnsoperator/controller/csinodetopology/csinodetopology_controller_test.go @@ -157,7 +157,6 @@ func TestCSINodeTopologyControllerForTKGSHA(t *testing.T) { scheme: s, configInfo: &cnsconfig.ConfigurationInfo{}, recorder: record.NewFakeRecorder(testBufferSize), - useNodeUuid: true, enableTKGsHAinGuest: true, vmOperatorClient: fakeVmOperatorClient, supervisorNamespace: testSupervisorNamespace, diff --git a/pkg/syncer/metadatasyncer.go b/pkg/syncer/metadatasyncer.go index b55892812d..616e5a1df6 100644 --- a/pkg/syncer/metadatasyncer.go +++ b/pkg/syncer/metadatasyncer.go @@ -248,6 +248,7 @@ func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFl if err != nil { return err } + vCenter.Config.ReloadVCConfigForNewClient = true metadataSyncer.host = vCenter.Config.Host cnsDeletionMap[metadataSyncer.host] = make(map[string]bool) @@ -714,7 +715,7 @@ func startTopologyCRInformer(ctx context.Context, cfg *restclient.Config) error } csiNodeTopologyInformer := dynInformer.Informer() // TODO: Multi-VC: Use a RWLock to guard simultaneous updates to topologyVCMap - csiNodeTopologyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = csiNodeTopologyInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { topoCRAdded(obj) }, @@ -725,6 +726,9 @@ func startTopologyCRInformer(ctx context.Context, cfg *restclient.Config) error topoCRDeleted(obj) }, }) + if err != nil { + return err + } // Start informer. go func() { log.Infof("Informer to watch on %s CR starting..", csinodetopology.CRDSingular) @@ -737,7 +741,7 @@ func startTopologyCRInformer(ctx context.Context, cfg *restclient.Config) error // in the MetadataSyncer.topologyVCMap parameter. func addLabelsToTopologyVCMap(ctx context.Context, nodeTopoObj csinodetopologyv1alpha1.CSINodeTopology) { log := logger.GetLogger(ctx) - nodeVM, err := nodeMgr.GetNode(ctx, nodeTopoObj.Spec.NodeUUID, nil) + nodeVM, err := nodeMgr.GetNodeVMAndUpdateCache(ctx, nodeTopoObj.Spec.NodeUUID, nil) if err != nil { log.Errorf("Node %q is not yet registered in the node manager. Error: %+v", nodeTopoObj.Spec.NodeUUID, err) @@ -854,7 +858,7 @@ func topoCRDeleted(obj interface{}) { // instance in the MetadataSyncer.topologyVCMap parameter. func removeLabelsFromTopologyVCMap(ctx context.Context, nodeTopoObj csinodetopologyv1alpha1.CSINodeTopology) { log := logger.GetLogger(ctx) - nodeVM, err := nodeMgr.GetNode(ctx, nodeTopoObj.Spec.NodeUUID, nil) + nodeVM, err := nodeMgr.GetNodeVMAndUpdateCache(ctx, nodeTopoObj.Spec.NodeUUID, nil) if err != nil { log.Errorf("Node %q is not yet registered in the node manager. Error: %+v", nodeTopoObj.Spec.NodeUUID, err) diff --git a/pkg/syncer/resize_reconciler.go b/pkg/syncer/resize_reconciler.go index 2d60cb2db9..79e16f66c1 100644 --- a/pkg/syncer/resize_reconciler.go +++ b/pkg/syncer/resize_reconciler.go @@ -109,10 +109,12 @@ func newResizeReconciler( // FileSystemResizePending is not removed from SV PVC when syncer is down // and FileSystemResizePending was removed from a TKG PVC. // https://github.com/kubernetes-sigs/vsphere-csi-driver/issues/591 - pvcInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ + _, err := pvcInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ UpdateFunc: rc.updatePVC, }, resyncPeriod) - + if err != nil { + return nil, err + } informerFactory.Start(stopCh) if !cache.WaitForCacheSync(stopCh, rc.pvcSynced, rc.pvSynced) { return nil, fmt.Errorf("cannot sync pv/pvc caches") diff --git a/pkg/syncer/syncer_test.go b/pkg/syncer/syncer_test.go index 40e9d4b89c..dc65fa52b0 100644 --- a/pkg/syncer/syncer_test.go +++ b/pkg/syncer/syncer_test.go @@ -114,7 +114,7 @@ func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (*cnsconf cfg.Global.VCenterIP = s.URL.Hostname() cfg.Global.VCenterPort = s.URL.Port() - cfg.Global.User = s.URL.User.Username() + cfg.Global.User = s.URL.User.Username() + "@vsphere.local" cfg.Global.Password, _ = s.URL.User.Password() cfg.Global.Datacenters = "DC0" diff --git a/pkg/syncer/util.go b/pkg/syncer/util.go index 42de62a56d..6ea95737f4 100644 --- a/pkg/syncer/util.go +++ b/pkg/syncer/util.go @@ -361,6 +361,7 @@ func getConfig(ctx context.Context) (*cnsconfig.Config, error) { "vSphere config secret and in immutable ConfigMap") } cfg.Global.ClusterID = clusterID + cnsconfig.GeneratedVanillaClusterID = clusterID } else { if _, err := commonco.ContainerOrchestratorUtility.GetConfigMap(ctx, cnsconfig.ClusterIDConfigMapName, CSINamespace); err == nil { diff --git a/pkg/syncer/volume_health_reconciler.go b/pkg/syncer/volume_health_reconciler.go index b0a97f8876..0d5910d210 100644 --- a/pkg/syncer/volume_health_reconciler.go +++ b/pkg/syncer/volume_health_reconciler.go @@ -155,18 +155,23 @@ func NewVolumeHealthReconciler( }, } - svcPVCInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ + _, err := svcPVCInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ AddFunc: rc.svcAddPVC, UpdateFunc: rc.svcUpdatePVC, DeleteFunc: rc.svcAddPVC, }, resyncPeriod) + if err != nil { + return nil, err + } - tkgPVInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ + _, err = tkgPVInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ AddFunc: nil, UpdateFunc: rc.tkgUpdatePV, DeleteFunc: rc.tkgDeletePV, }, resyncPeriod) - + if err != nil { + return nil, fmt.Errorf("cannot sync tkg pv cache") + } ctx, log := logger.GetNewContextWithLogger() // Start TKG Informers. diff --git a/tests/e2e/OWNERS b/tests/e2e/OWNERS index c3584925bf..660fd9dbd4 100644 --- a/tests/e2e/OWNERS +++ b/tests/e2e/OWNERS @@ -1,4 +1,3 @@ approvers: - openshift-storage-maintainers -component: "Storage" -subcomponent: Kubernetes External Components +component: "Storage / Kubernetes External Components" diff --git a/tests/e2e/config_change_test.go b/tests/e2e/config_change_test.go index f8028c7783..1f82a517e4 100644 --- a/tests/e2e/config_change_test.go +++ b/tests/e2e/config_change_test.go @@ -23,6 +23,7 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() { storagePolicyName string ctx context.Context nimbusGeneratedVcPwd string + clientIndex int ) const ( configSecret = "vsphere-config-secret" @@ -44,6 +45,7 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() { defer cancel() nimbusGeneratedVcPwd = GetAndExpectStringEnvVar(nimbusVcPwd) + clientIndex = 0 }) ginkgo.AfterEach(func() { @@ -99,7 +101,8 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() { username := vsphereCfg.Global.User currentPassword := vsphereCfg.Global.Password newPassword := e2eTestPassword - err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, newPassword, vcAddress) + err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, newPassword, vcAddress, + false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Modifying the password in the secret") @@ -114,7 +117,8 @@ var _ bool = ginkgo.Describe("[csi-supervisor] config-change-test", func() { defer func() { ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, currentPassword, vcAddress) + err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, currentPassword, vcAddress, false, + clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Reverting the secret change back to reflect the original password") diff --git a/tests/e2e/config_secret.go b/tests/e2e/config_secret.go index 7b8a135801..22b8d2bfc1 100644 --- a/tests/e2e/config_secret.go +++ b/tests/e2e/config_secret.go @@ -62,6 +62,7 @@ var _ = ginkgo.Describe("Config-Secret", func() { dataCenter string sshClientConfig *ssh.ClientConfig nimbusGeneratedK8sVmPwd string + clusterId string ) ginkgo.BeforeEach(func() { @@ -85,6 +86,7 @@ var _ = ginkgo.Describe("Config-Secret", func() { vCenterIP = e2eVSphere.Config.Global.VCenterHostname vCenterPort = e2eVSphere.Config.Global.VCenterPort dataCenter = e2eVSphere.Config.Global.Datacenters + clusterId = e2eVSphere.Config.Global.ClusterID propagateVal = "false" revertOriginalvCenterUser = false configSecretUser1Alias = configSecretTestUser1 + "@vsphere.local" @@ -126,10 +128,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, - vCenterPort, dataCenter, "") + vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -148,8 +150,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 7. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Update user credentials in vsphere config "+ - "secret keeping password same for both test users", func() { + ginkgo.It("Update user credentials in vsphere config secret keeping password same "+ + "for both test users", ginkgo.Label(p1, vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -177,10 +179,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Create vsphere-config-secret file with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, - csiNamespace, vCenterIP, vCenterPort, dataCenter, "") + csiNamespace, vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -201,22 +203,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Create vsphere-config-secret file with testuser2 credentials") createCsiVsphereSecret(client, ctx, configSecretUser2Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, - vCenterPort, dataCenter, "") + vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -247,8 +249,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 12. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Change vcenter user password "+ - "and restart csi controller pod", func() { + ginkgo.It("Change vcenter user password and restart csi controller pod", ginkgo.Label(p0, + vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() testUser1NewPassword := "Admin!123" @@ -278,10 +280,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -346,26 +348,30 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret file with testuser1 updated credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, testUser1NewPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Wait for csi controller pods to be in running state") + framework.Logf("Waiting for %v csi controller pods to be in running state", + pollTimeout) + time.Sleep(pollTimeout) + + ginkgo.By("Check csi controller pods running state") list_of_pods, err := fpod.GetPodsInNamespace(client, csiNamespace, ignoreLabels) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := 0; i < len(list_of_pods); i++ { @@ -400,8 +406,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 7. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Update user credentials in vsphere config "+ - "secret keeping password different for both test users", func() { + ginkgo.It("Update user credentials in vsphere config secret keeping password different for both test "+ + "users", ginkgo.Label(p0, vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -429,10 +435,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -453,22 +459,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser2 credentials") createCsiVsphereSecret(client, ctx, configSecretUser2Alias, configSecretTestUser2Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -497,8 +503,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 9. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Change vcenter ip to hostname and "+ - "viceversa in vsphere config secret", func() { + ginkgo.It("Change vcenter ip to hostname and viceversa in vsphere config "+ + "secret", ginkgo.Label(p0, vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -526,10 +532,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials using vcenter IP") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -555,10 +561,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret to use vcenter hostname") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterHostName, vCenterPort, dataCenter, "") + vCenterHostName, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -570,22 +576,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret to use vcenter IP") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, vCenterPort, - dataCenter, "") + dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -613,8 +619,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 9. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Change vcenter user to wrong dummy "+ - "user and later switch back to correct one", func() { + ginkgo.It("Change vcenter user to wrong dummy user and later switch back to "+ + "correct one", ginkgo.Label(p0, vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() dummyTestUser := "dummyUser@vsphere.local" @@ -643,10 +649,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -667,7 +673,7 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with dummy user credentials") createCsiVsphereSecret(client, ctx, dummyTestUser, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") err = updateDeploymentReplicawithWait(client, 0, vSphereCSIControllerPodNamePrefix, csiNamespace) @@ -697,22 +703,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -742,8 +748,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 10. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Add a user without adding required "+ - "roles and privileges and switch back to the correct one", func() { + ginkgo.It("Add a user without adding required roles and privileges and switch back "+ + "to the correct one", ginkgo.Label(p0, vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -771,10 +777,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -795,10 +801,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser2 credentials") createCsiVsphereSecret(client, ctx, configSecretUser2Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -818,22 +824,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials which has required roles and privileges") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -863,8 +869,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 8. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Add necessary roles and privileges "+ - "to the user post CSI driver creation", func() { + ginkgo.It("Add necessary roles and privileges to the user post CSI driver "+ + "creation", ginkgo.Label(p0, vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -892,22 +898,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -940,7 +946,7 @@ var _ = ginkgo.Describe("Config-Secret", func() { dataCenters, clusters, hosts, vms, datastores, "reuseUser", "reuseRoles") ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -975,8 +981,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 10. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Add wrong datacenter and switch back "+ - "to the correct datacenter in vsphere config secret file", func() { + ginkgo.It("Add wrong datacenter and switch back to the correct datacenter in vsphere "+ + "config secret file", ginkgo.Label(p1, vsphereConfigSecret, block, file, vanilla), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1006,10 +1012,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1030,7 +1036,7 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with dummy datacenter details") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dummyDataCenter, "") + vCenterIP, vCenterPort, dummyDataCenter, "", clusterId) ginkgo.By("Restart CSI driver") err = updateDeploymentReplicawithWait(client, 0, vSphereCSIControllerPodNamePrefix, csiNamespace) @@ -1054,22 +1060,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1106,8 +1112,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 10. Cleanup all objects created during the test */ - ginkgo.It("[csi-config-secret-file] Add wrong targetvSANFileShareDatastoreURLs and switch back to the correct "+ - "targetvSANFileShareDatastoreURLs", func() { + ginkgo.It("Add wrong targetvSANFileShareDatastoreURLs and switch back to the correct "+ + "targetvSANFileShareDatastoreURLs", ginkgo.Label(p1, vsphereConfigSecret, file), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() targetDsURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) @@ -1136,10 +1142,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1160,22 +1166,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with testuser1 credentials and pass target datastore url") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, targetDsURL) + vCenterIP, vCenterPort, dataCenter, targetDsURL, clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1201,7 +1207,7 @@ var _ = ginkgo.Describe("Config-Secret", func() { 9. Cleanup all objects created during the test */ - ginkgo.It("[vc-custom-port] VC with Custom Port", func() { + ginkgo.It("VC with Custom Port", ginkgo.Label(p1, vsphereConfigSecret, file, block, customPort), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() defaultvCenterPort := "443" @@ -1230,10 +1236,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Create vsphere-config-secret file with testuser1 credentials using default vc port") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, defaultvCenterPort, dataCenter, "") + vCenterIP, defaultvCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1265,22 +1271,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Create vsphere-config-secret file with testuser1 credentials using non-default port") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1328,7 +1334,8 @@ var _ = ginkgo.Describe("Config-Secret", func() { 10. Cleanup all objects created during the test */ - ginkgo.It("[vc-custom-port] Modify VC Port and validate the workloads", func() { + ginkgo.It("Modify VC Port and validate the workloads", ginkgo.Label(p1, vsphereConfigSecret, file, block, + customPort), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() dummyvCenterPort := "4444" @@ -1357,10 +1364,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Create vsphere-config-secret file using testuser1 credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err := restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1382,10 +1389,10 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret file with dummy vcenter port") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, dummyvCenterPort, dataCenter, "") + vCenterIP, dummyvCenterPort, dataCenter, "", clusterId) ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1405,22 +1412,22 @@ var _ = ginkgo.Describe("Config-Secret", func() { ginkgo.By("Update vsphere-config-secret with correct vCenter credentials") createCsiVsphereSecret(client, ctx, configSecretUser1Alias, configSecretTestUser1Password, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) defer func() { ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + "and its credentials") createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, - vCenterIP, vCenterPort, dataCenter, "") + vCenterIP, vCenterPort, dataCenter, "", clusterId) revertOriginalvCenterUser = true ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Restart CSI driver") - restartSuccess, err = restartCSIDriver(ctx, client, namespace, csiReplicas) + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/config_secret_utils.go b/tests/e2e/config_secret_utils.go index e10ca24e80..a46bfbf502 100644 --- a/tests/e2e/config_secret_utils.go +++ b/tests/e2e/config_secret_utils.go @@ -28,6 +28,7 @@ import ( "golang.org/x/crypto/ssh" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -550,17 +551,20 @@ func setSearchlevelPermission(masterIp string, sshClientConfig *ssh.ClientConfig // createCsiVsphereSecret method is used to create csi vsphere secret file func createCsiVsphereSecret(client clientset.Interface, ctx context.Context, testUser string, - password string, csiNamespace string, vCenterIP string, - vCenterPort string, dataCenter string, targetvSANFileShareDatastoreURLs string) { + password string, csiNamespace string, vCenterIP string, vCenterPort string, + dataCenter string, targetvSANFileShareDatastoreURLs string, clusterID string) { currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, configSecret, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) originalConf := string(currentSecret.Data[vSphereCSIConf]) vsphereCfg, err := readConfigFromSecretString(originalConf) + framework.Logf("original config: %v", vsphereCfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) vsphereCfg.Global.User = testUser vsphereCfg.Global.Password = password vsphereCfg.Global.Datacenters = dataCenter + vsphereCfg.Global.ClusterID = clusterID vsphereCfg.Global.TargetvSANFileShareDatastoreURLs = targetvSANFileShareDatastoreURLs + framework.Logf("updated config: %v", vsphereCfg) modifiedConf, err := writeConfigToSecretString(vsphereCfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Updating the secret to reflect new conf credentials") @@ -800,3 +804,55 @@ func createTestUserAndAssignLimitedRolesAndPrivileges(masterIp string, sshClient } } } + +// verifyClusterIdConfigMapGeneration verifies if cluster id configmap gets generated by +// csi driver in csi namespace +func verifyClusterIdConfigMapGeneration(client clientset.Interface, ctx context.Context, + csiNamespace string, cmToExist bool) { + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, vsphereClusterIdConfigMapName, + metav1.GetOptions{}) + if cmToExist && apierrors.IsNotFound(err) { + framework.Logf("Configmap: %s not found in namespace: %s", vsphereClusterIdConfigMapName, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if !cmToExist && !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } +} + +// fetchClusterIdFromConfigmap fetches cluster id value from +// auto generated cluster id configmap by csi driver +func fetchClusterIdFromConfigmap(client clientset.Interface, ctx context.Context, + csiNamespace string) string { + clusterIdCm, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, vsphereClusterIdConfigMapName, + metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + data := clusterIdCm.Data + framework.Logf("cluster id configmap: %v", clusterIdCm) + return data["clusterID"] +} + +// recreateVsphereConfigSecret recreates config secret with new config parameters +// and restarts CSI driver +func recreateVsphereConfigSecret(client clientset.Interface, ctx context.Context, + vCenterUIUser string, vCenterUIPassword string, csiNamespace string, vCenterIP string, + clusterId string, vCenterPort string, dataCenter string, csiReplicas int32) { + createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, + vCenterIP, vCenterPort, dataCenter, "", clusterId) + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +// getCSIConfigSecretData returns data obtained fom csi config secret +// in namespace where CSI is deployed +func getCSIConfigSecretData(client clientset.Interface, ctx context.Context, + csiNamespace string) e2eTestConfig { + currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalConf := string(currentSecret.Data[vSphereCSIConf]) + vsphereCfg, err := readConfigFromSecretString(originalConf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return vsphereCfg +} diff --git a/tests/e2e/csi_cns_telemetry_statefulsets.go b/tests/e2e/csi_cns_telemetry_statefulsets.go index 4e35fa831d..729dde17bd 100644 --- a/tests/e2e/csi_cns_telemetry_statefulsets.go +++ b/tests/e2e/csi_cns_telemetry_statefulsets.go @@ -90,6 +90,11 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] if vanillaCluster { // Reset the cluster distribution value to default value "CSI-Vanilla". setClusterDistribution(ctx, client, vanillaClusterDistribution) + } else if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } else { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } }) @@ -138,7 +143,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = sc.Name + Spec.StorageClassName = &sc.Name CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready. diff --git a/tests/e2e/csi_cns_telemetry_vc_reboot.go b/tests/e2e/csi_cns_telemetry_vc_reboot.go index 11b20c50a3..d8d843a851 100644 --- a/tests/e2e/csi_cns_telemetry_vc_reboot.go +++ b/tests/e2e/csi_cns_telemetry_vc_reboot.go @@ -124,7 +124,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] "+ ginkgo.By("Waiting for claim to be in bound phase") pvc, err := fpv.WaitForPVClaimBoundPhase(client, - []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + []*v1.PersistentVolumeClaim{pvclaim}, 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvc).NotTo(gomega.BeEmpty()) pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) @@ -201,7 +201,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] "+ ginkgo.By("Waiting for PVC2 claim to be in bound phase") pvc2, err := fpv.WaitForPVClaimBoundPhase(client, - []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + []*v1.PersistentVolumeClaim{pvclaim2}, 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvc2).NotTo(gomega.BeEmpty()) pv2 := getPvFromClaim(client, pvclaim2.Namespace, pvclaim2.Name) @@ -330,7 +330,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] "+ ginkgo.By("Waiting for claim to be in bound phase") pvc, err := fpv.WaitForPVClaimBoundPhase(client, - []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + []*v1.PersistentVolumeClaim{pvclaim}, 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(pvc).NotTo(gomega.BeEmpty()) pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index 3d2736fecc..8846e99fab 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -36,31 +36,35 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" fdep "k8s.io/kubernetes/test/e2e/framework/deployment" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" - snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" ) -var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", func() { +var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { f := framework.NewDefaultFramework("volume-snapshot") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( - client clientset.Interface - c clientset.Interface - namespace string - scParameters map[string]string - datastoreURL string - pandoraSyncWaitTime int - pvclaims []*v1.PersistentVolumeClaim - volumeOpsScale int - restConfig *restclient.Config - snapc *snapclient.Clientset - nimbusGeneratedK8sVmPwd string + client clientset.Interface + clientNewGc clientset.Interface + c clientset.Interface + namespace string + scParameters map[string]string + datastoreURL string + pandoraSyncWaitTime int + volumeOpsScale int + restConfig *restclient.Config + guestClusterRestConfig *restclient.Config + snapc *snapclient.Clientset + storagePolicyName string + clientIndex int ) ginkgo.BeforeEach(func() { @@ -76,9 +80,15 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f } //Get snapshot client using the rest config - restConfig = getRestConfigClient() - snapc, err = snapclient.NewForConfig(restConfig) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !guestCluster { + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + guestClusterRestConfig = getRestConfigClientForGuestCluster(guestClusterRestConfig) + snapc, err = snapclient.NewForConfig(guestClusterRestConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } if os.Getenv(envPandoraSyncWaitTime) != "" { pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) @@ -105,54 +115,81 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) c = remoteC } - nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) + + if guestCluster { + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + } + + if !guestCluster { + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + guestClusterRestConfig = getRestConfigClientForGuestCluster(guestClusterRestConfig) + snapc, err = snapclient.NewForConfig(guestClusterRestConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + ginkgo.AfterEach(func() { + if guestCluster { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + framework.Logf("Collecting supervisor PVC events before performing PV/PVC cleanup") + eventList, err := svcClient.CoreV1().Events(svcNamespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, item := range eventList.Items { + framework.Logf(fmt.Sprintf(item.Message)) + } + } }) /* - Create/Delete snapshot via k8s API using PVC (Dynamic Provisioning) - - 1. Create a storage class (eg: vsan default) and create a pvc using this sc - 2. Create a VolumeSnapshot class with snapshotter as vsphere-csi-driver and set deletionPolicy to Delete - 3. Create a volume-snapshot with labels, using the above snapshot-class and pvc (from step-1) as source - 4. Ensure the snapshot is created, verify using get VolumeSnapshot - 5. Also verify that VolumeSnapshotContent is auto-created - 6. Verify the references to pvc and volume-snapshot on this object - 7. Verify that the VolumeSnapshot has ready-to-use set to True - 8. Verify that the Restore Size set on the snapshot is same as that of the source volume size - 9. Query the snapshot from CNS side using volume id - should pass and return the snapshot entry - 10. Delete the above snapshot from k8s side using kubectl delete, run a get and ensure it is removed - 11. Also ensure that the VolumeSnapshotContent is deleted along with the - volume snapshot as the policy is delete - 12. Query the snapshot from CNS side - should return 0 entries - 13. Cleanup: Delete PVC, SC (validate they are removed) + Create/Delete snapshot via k8s API using PVC (Dynamic Provisioning) + + 1. Create a storage class (eg: vsan default) and create a pvc using this sc + 2. Create a VolumeSnapshot class with snapshotter as vsphere-csi-driver and set deletionPolicy to Delete + 3. Create a volume-snapshot with labels, using the above snapshot-class and pvc (from step-1) as source + 4. Ensure the snapshot is created, verify using get VolumeSnapshot + 5. Also verify that VolumeSnapshotContent is auto-created + 6. Verify the references to pvc and volume-snapshot on this object + 7. Verify that the VolumeSnapshot has ready-to-use set to True + 8. Verify that the Restore Size set on the snapshot is same as that of the source volume size + 9. Query the snapshot from CNS side using volume id - should pass and return the snapshot entry + 10. Delete the above snapshot from k8s side using kubectl delete, run a get and ensure it is removed + 11. Also ensure that the VolumeSnapshotContent is deleted along with the + volume snapshot as the policy is delete + 12. Query the snapshot from CNS side - should return 0 entries + 13. Cleanup: Delete PVC, SC (validate they are removed) */ - ginkgo.It("Verify snapshot dynamic provisioning workflow", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Verify snapshot dynamic provisioning workflow", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false + var volHandle string + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle = persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -160,84 +197,43 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false - - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - framework.Logf("Deleting volume snapshot Again to check Not found error") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }) /* @@ -245,19 +241,19 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 1. Create a storage class (eg: vsan default) and create a pvc using this sc 2. The volumesnapshotclass is set to delete 3. Create a VolumeSnapshotContent using snapshot-handle - a. get snapshotHandle by referring to an existing volume snapshot - b. this snapshot will be created dynamically, and the snapshot-content that is - created by that will be referred to get the snapshotHandle + a. get snapshotHandle by referring to an existing volume snapshot + b. this snapshot will be created dynamically, and the snapshot-content that is + created by that will be referred to get the snapshotHandle 4. Create a volume snapshot using source set to volumeSnapshotContentName above 5. Ensure the snapshot is created, verify using get VolumeSnapshot 6. Verify the restoreSize on the snapshot and the snapshotcontent is set to same as that of the pvcSize 7. Delete the above snapshot, run a get from k8s side and ensure its removed 8. Run QuerySnapshot from CNS side, the backend snapshot should be deleted 9. Also ensure that the VolumeSnapshotContent is deleted along with the - volume snapshot as the policy is delete + volume snapshot as the policy is delete 10. Cleanup the pvc */ - ginkgo.It("Verify snapshot static provisioning through K8s API workflow", func() { + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning through K8s API workflow", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -318,22 +314,16 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotCreated := true defer func() { - if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + } }() ginkgo.By("Verify volume snapshot is created") @@ -353,7 +343,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Creating volume snapshot content by snapshotHandle %s", snapshothandle)) @@ -371,9 +361,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated2 { - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentNew.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentNew.ObjectMeta.Name, pandoraSyncWaitTime) } }() @@ -387,8 +376,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotCreated2 { - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, "static-vs", metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, "static-vs", pandoraSyncWaitTime) } }() @@ -399,8 +387,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Snapshot details is %+v", staticSnapshot) ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, staticSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) snapshotCreated2 = false framework.Logf("Wait till the volume snapshot is deleted") @@ -421,11 +408,11 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 8. Delete the above snapshot, run a get from k8s side and ensure its removed 9. Run QuerySnapshot from CNS side, the backend snapshot should be deleted 10. Also ensure that the VolumeSnapshotContent is deleted along with the - volume snapshot as the policy is delete + volume snapshot as the policy is delete 11. The snapshot that was created via CNS in step-2 should be deleted as part of k8s snapshot delete 12. Delete the pvc */ - ginkgo.It("Verify snapshot static provisioning via CNS", func() { + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning via CNS", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -490,7 +477,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }() ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotHandle := volHandle + "+" + snapshotId @@ -509,9 +496,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated2 { - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentNew.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentNew.ObjectMeta.Name, pandoraSyncWaitTime) } }() @@ -537,8 +523,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Snapshot details is %+v", staticSnapshot) ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, staticSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) snapshotCreated1 = false snapshotCreated2 = false @@ -553,9 +538,9 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 1. Create a storage class (eg: vsan default) and create a pvc using this sc 2. The volumesnapshotclass is set to delete 3. Create a VolumeSnapshotContent using snapshot-handle with deletion policy Retain - a. get snapshotHandle by referring to an existing volume snapshot - b. this snapshot will be created dynamically, and the snapshot-content that is - created by that will be referred to get the snapshotHandle + a. get snapshotHandle by referring to an existing volume snapshot + b. this snapshot will be created dynamically, and the snapshot-content that is + created by that will be referred to get the snapshotHandle 4. Create a volume snapshot using source set to volumeSnapshotContentName above 5. Ensure the snapshot is created, verify using get VolumeSnapshot 6. Verify the restoreSize on the snapshot and the snapshotcontent is set to same as that of the pvcSize @@ -567,7 +552,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 12. Delete volume snapshot content 2 13. Cleanup the pvc, volume snapshot class and storage class */ - ginkgo.It("Verify snapshot static provisioning with deletion policy Retain", func() { + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning with deletion policy Retain", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -629,16 +614,17 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() @@ -659,7 +645,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ns, err := framework.CreateTestingNS(f.BaseName, client, nil) @@ -680,9 +666,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated2 { - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentNew.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentNew.ObjectMeta.Name, pandoraSyncWaitTime) } }() @@ -719,9 +704,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(snapshotContentGetResult.Name).Should(gomega.Equal(snapshotContent2.Name)) framework.Logf("Snapshotcontent name is %s", snapshotContentGetResult.ObjectMeta.Name) - framework.Logf("Deleting volume snapshot 1 ") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Deleting volume snapshot 1") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) snapshotCreated = false framework.Logf("Wait till the volume snapshot is deleted") @@ -730,9 +714,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotContentCreated = false framework.Logf("Delete volume snapshot content 2") - err = snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentGetResult.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentGetResult.ObjectMeta.Name, pandoraSyncWaitTime) snapshotContentCreated2 = false }) @@ -749,7 +732,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 9. Query the Snasphot from CNS side using the volumeId 10. Cleanup the snapshot and delete the volume */ - ginkgo.It("Verify snapshot static provisioning with deletion policy Retain - test2", func() { + ginkgo.It("[block-vanilla-snapshot] Verify snapshot static provisioning with deletion policy Retain - test2", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -812,15 +795,16 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, snapshot1.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) } if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - contentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + contentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, contentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -843,15 +827,12 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Deleting volume snapshot 1 " + snapshot1.Name) - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, snapshot1.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) snapshotCreated = false - time.Sleep(kubeAPIRecoveryTime) _, err = snapc.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, snapshot1.Name, metav1.GetOptions{}) @@ -862,7 +843,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify volume snapshot content is not deleted") @@ -873,9 +854,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Snapshotcontent name is %s", snapshotContentGetResult.Name) framework.Logf("Delete volume snapshot content") - err = snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentGetResult.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentGetResult.Name, pandoraSyncWaitTime) snapshotContentCreated = false framework.Logf("Wait till the volume snapshot content is deleted") @@ -891,14 +871,16 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshots as source, use the same sc 4. Ensure the pvc gets provisioned and is Bound 5. Attach the pvc to a pod and ensure data from snapshot is available - (file that was written in step.1 should be available) + (file that was written in step.1 should be available) 6. And also write new data to the restored volumes and it should succeed 7. Delete the snapshots and pvcs/pods created in steps 1,2,3 8. Continue to write new data to the restore volumes and it should succeed 9. Create new snapshots on restore volume and verify it succeeds 10. Run cleanup: Delete snapshots, restored-volumes, pods */ - ginkgo.It("Volume restore using snapshot a dynamic snapshot b pre-provisioned snapshot", func() { + + ginkgo.It("[block-vanilla-snapshot] Volume restore using snapshot a dynamic snapshot b "+ + "pre-provisioned snapshot", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -960,16 +942,17 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() @@ -990,7 +973,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Creating volume snapshot content by snapshotHandle %s", snapshothandle)) @@ -1008,9 +991,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated2 { - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentNew.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentNew.ObjectMeta.Name, pandoraSyncWaitTime) } }() @@ -1024,8 +1006,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotCreated2 { - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, "static-vs", metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, "static-vs", pandoraSyncWaitTime) } }() @@ -1126,25 +1107,25 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output2 := framework.RunKubectlOrDie(namespace, cmd2...) + output2 := e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output2, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd2...) - output2 = framework.RunKubectlOrDie(namespace, cmd2...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + output2 = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output2, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) ginkgo.By("Create a volume snapshot") @@ -1158,16 +1139,17 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated3 { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated3 { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() @@ -1189,12 +1171,11 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId3 := strings.Split(snapshothandle3, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle2, snapshotId3) + err = verifySnapshotIsCreatedInCNS(volHandle2, snapshotId3, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot3.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) snapshotCreated3 = false framework.Logf("Wait till the volume snapshot is deleted") @@ -1203,8 +1184,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotContentCreated3 = false ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, staticSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) snapshotCreated2 = false framework.Logf("Wait till the volume snapshot is deleted") @@ -1213,8 +1193,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotContentCreated2 = false ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) snapshotCreated = false framework.Logf("Wait till the volume snapshot is deleted") @@ -1224,48 +1203,61 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotContentCreated = false ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* - Volume restore using snapshot on a different storageclass - 1. Create a sc with thin-provisioned spbm policy, create a pvc and attach the pvc to a pod - 2. Create a dynamically provisioned snapshots using this pvc - 3. create another sc pointing to a different spbm policy (say thick) - 4. Run a restore workflow by giving a different storageclass in the pvc spec - 5. the new storageclass would point to a thick provisioned spbm plocy, - while the source pvc was created usig thin provisioned psp-operatorlicy - 6. cleanup spbm policies, sc's, pvc's + + Snapshot creation and restore workflow verification with xfs filesystem + 1. Create a storage class with fstype set to XFS and create a pvc using this sc + 2. Create a pod which uses above PVC + 3. Create file1.txt data at mountpath + 4. Create a VolumeSnapshotClass with snapshotter as vsphere-csi-driver and set deletionPolicy to Delete + 5. Create a VolumeSnapshot with labels, using the above snapshot-class and pvc (from step-1) as source + 6. Ensure the snapshot is created, verify using get VolumeSnapshot + 7. Also verify that VolumeSnapshotContent is auto created + 8. Verify that the VolumeSnapshot has ReadyToUse set to True + 9. Query the snapshot from CNS side using volume id to ensure that snapshot is created + 10. Create new PVC using above snapshot as source (restore operation) + 11. Ensure the PVC gets provisioned and is Bound + 12. Attach this PVC to a pod on the same node where source volume is mounted + 13. Ensure that file1.txt from snapshot is available + 14. And write new file file2.txt to the restored volume and it should succeed + 15. Delete the VolumeSnapshot, PVCs and pods created in above steps and ensure it is removed + 16. Query the snapshot from CNS side - it shouldn't be available + 17. Delete SC and VolumeSnapshotClass */ - ginkgo.It("Volume restore using snapshot on a different storageclass", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume snapshot creation and restoration workflow "+ + "with xfs filesystem", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false - ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + scParameters[scParamFsType] = xfsFSType + + ginkgo.By("Create storage class with xfs filesystem and create PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, + *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1280,83 +1272,85 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + // Verify volume is attached to the node + var vmUUID string + nodeName := pod.Spec.NodeName + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + // Verify filesystem used to mount volume inside pod is xfs + ginkgo.By("Verify that filesystem type is xfs as expected") + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + xfsFSType, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create file1.txt at mountpath inside pod + ginkgo.By(fmt.Sprintf("Creating file file1.txt at mountpath inside pod: %v", pod.Name)) + data1 := "This file file1.txt is written by Pod1" + filePath1 := "/mnt/volume1/file1.txt" + writeDataOnFileFromPod(namespace, pod.Name, filePath1, data1) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - scParameters1 := make(map[string]string) - - scParameters1[scParamStoragePolicyName] = "Management Storage Policy - Regular" - - curtime := time.Now().Unix() - randomValue := rand.Int() - val := strconv.FormatInt(int64(randomValue), 10) - val = string(val[1:3]) - curtimestring := strconv.FormatInt(curtime, 10) - scName := "snapshot" + curtimestring + val - storageclass1, err := createStorageClass(client, scParameters1, nil, "", "", false, scName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - pvcSpec2 := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass1, nil, - v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) - - pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim2}, - framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle + ginkgo.By("Restore snapshot to new PVC") + pvclaim2, persistentVolumes2, _ := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot, false) + volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) defer func() { @@ -1366,40 +1360,198 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) + ginkgo.By("Creating a pod to attach restored PV on the same node where earlier pod is running") + nodeSelector := make(map[string]string) + nodeSelector["kubernetes.io/hostname"] = nodeName + pod2, err := createPod(client, namespace, nodeSelector, []*v1.PersistentVolumeClaim{pvclaim2}, false, + execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName) + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify that new pod is scheduled on same node where earlier pod is running") + nodeName2 := pod2.Spec.NodeName + gomega.Expect(nodeName == nodeName2).To(gomega.BeTrue(), "Pod is not scheduled on expected node") + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod2.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod2.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName2)) + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle2, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + // Verify filesystem used to mount volume inside pod is xfs + ginkgo.By("Verify that filesystem type is xfs inside pod which is using restored PVC") + _, err = e2eoutput.LookForStringInPodExec(namespace, pod2.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + xfsFSType, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Ensure that file1.txt is available as expected on the restored PVC + ginkgo.By("Verify that file1.txt data is available as part of snapshot") + output := readFileFromPod(namespace, pod2.Name, filePath1) + gomega.Expect(output == data1+"\n").To(gomega.BeTrue(), + "Pod2 is not able to read file1.txt written before snapshot creation") + + // Create new file file2.txt at mountpath inside pod + ginkgo.By(fmt.Sprintf("Creating file file2.txt at mountpath inside pod: %v", pod2.Name)) + data2 := "This file file2.txt is written by Pod2" + filePath2 := "/mnt/volume1/file2.txt" + writeDataOnFileFromPod(namespace, pod2.Name, filePath2, data2) + + ginkgo.By("Verify that file2.txt data can be successfully read") + output = readFileFromPod(namespace, pod2.Name, filePath2) + gomega.Expect(output == data2+"\n").To(gomega.BeTrue(), "Pod2 is not able to read file2.txt") + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Volume restore using snapshot on a different storageclass + 1. Create a sc with thin-provisioned spbm policy, create a pvc and attach the pvc to a pod + 2. Create a dynamically provisioned snapshots using this pvc + 3. create another sc pointing to a different spbm policy (say thick) + 4. Run a restore workflow by giving a different storageclass in the pvc spec + 5. the new storageclass would point to a thick provisioned spbm plocy, + while the source pvc was created usig thin provisioned psp-operatorlicy + 6. cleanup spbm policies, sc's, pvc's + */ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume restore using snapshot on a different storageclass", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + scParameters1 := make(map[string]string) + scParameters1[scParamStoragePolicyName] = "Management Storage Policy - Regular" + + curtime := time.Now().Unix() + randomValue := rand.Int() + val := strconv.FormatInt(int64(randomValue), 10) + val = string(val[1:3]) + curtimestring := strconv.FormatInt(curtime, 10) + scName := "snapshot" + curtimestring + val + var storageclass1 *storagev1.StorageClass + + if vanillaCluster { + storageclass1, err = createStorageClass(client, scParameters1, nil, "", "", false, scName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if guestCluster { + scName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores2) + storageclass1, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) + } + + pvclaim2, persistentVolumes2, _ := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass1, volumeSnapshot, false) + volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* - Delete the namespace hosting the pvcs and volume-snapshots and - recover the data using snapshot-content - 1. Create a sc, create a pvc using this sc on a non-default namesapce - 2. create a dynamic snapshot using the pvc as source - 3. verify volume-snapshot is ready-to-use and volumesnapshotcontent is auto-created - 4. Delete the non-default namespace which should delete all namespaced objects such as pvc, volume-snapshot - 5. Ensure the volumesnapshotcontent object which is cluster-scoped does not get deleted - 6. Also verify we can re-provision a snapshot and restore a volume using - this object on another namespace (could be default too) - 7. This VolumeSnapshotContent is dynamically created. we can't use it for pre-provisioned snapshot. - we would be creating a new VolumeSnapshotContent pointing to the same snapshotHandle - and then create a new VolumeSnapshot to bind with it - 8. Ensure the pvc with source as snapshot creates successfully and is bound - 9. Cleanup the snapshot, pvcs and ns + Delete the namespace hosting the pvcs and volume-snapshots and + recover the data using snapshot-content + 1. Create a sc, create a pvc using this sc on a non-default namesapce + 2. create a dynamic snapshot using the pvc as source + 3. verify volume-snapshot is ready-to-use and volumesnapshotcontent is auto-created + 4. Delete the non-default namespace which should delete all namespaced objects such as pvc, volume-snapshot + 5. Ensure the volumesnapshotcontent object which is cluster-scoped does not get deleted + 6. Also verify we can re-provision a snapshot and restore a volume using + this object on another namespace (could be default too) + 7. This VolumeSnapshotContent is dynamically created. we can't use it for pre-provisioned snapshot. + we would be creating a new VolumeSnapshotContent pointing to the same snapshotHandle + and then create a new VolumeSnapshot to bind with it + 8. Ensure the pvc with source as snapshot creates successfully and is bound + 9. Cleanup the snapshot, pvcs and ns */ - ginkgo.It("Delete the namespace hosting the pvcs and volume-snapshots and "+ - "recover the data using snapshot-content", func() { + ginkgo.It("[block-vanilla-snapshot] Delete the namespace hosting the pvcs and "+ + "volume-snapshots and recover the data using snapshot-content", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -1407,7 +1559,6 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f var pvclaims []*v1.PersistentVolumeClaim var err error var snapshotContentCreated = false - var snapshotCreated = false ginkgo.By("Creating new namespace for the test") namespace1, err := framework.CreateTestingNS(f.BaseName, client, nil) @@ -1464,6 +1615,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { + ginkgo.By("Delete volume snapshot class") err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1474,29 +1626,38 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f getVolumeSnapshotSpec(newNamespaceName, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Volume snapshot name is : %s", snapshot1.Name) - + snapshotCreated := true + var snapshotContent1Name = "" defer func() { if !isNamespaceDeleted { + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) + } if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *snapshot1.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *snapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) - if snapshotCreated { - framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(newNamespaceName).Delete(ctx, - snapshot1.Name, metav1.DeleteOptions{}) + framework.Logf("Wait till the volume snapshot content is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *snapshot1.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContent1Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot content is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *snapshot1.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() ginkgo.By("Verify volume snapshot is Ready to use") snapshot1_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, newNamespaceName, snapshot1.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true gomega.Expect(snapshot1_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) ginkgo.By("Verify volume snapshot content is created") @@ -1504,6 +1665,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f *snapshot1_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotContentCreated = true + snapshotContent1Name = snapshotContent1.Name gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) @@ -1512,7 +1674,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete namespace") @@ -1527,6 +1689,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f _, err = snapc.SnapshotV1().VolumeSnapshots(newNamespaceName).Get(ctx, snapshot1.Name, metav1.GetOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) + snapshotCreated = false _, err = snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, snapshotContent1.Name, metav1.GetOptions{}) @@ -1554,9 +1717,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated2 { - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContent2_updated.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContent2_updated.ObjectMeta.Name, pandoraSyncWaitTime) } }() @@ -1570,6 +1732,9 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotCreated2 { + framework.Logf("Deleting volume snapshot2") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot2.Name, pandoraSyncWaitTime) + err = e2eVSphere.deleteVolumeSnapshotInCNS(volHandle, volumeSnapshot2.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1596,6 +1761,15 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) defer func() { + if snapshotCreated2 { + framework.Logf("Deleting volume snapshot2") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace2Name, volumeSnapshot2.Name, pandoraSyncWaitTime) + + err = e2eVSphere.deleteVolumeSnapshotInCNS(volHandle, volumeSnapshot2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + snapshotCreated2 = false + } + framework.Logf("Deleting restored PVC") err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace2Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) @@ -1603,34 +1777,28 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }() ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace2Name).Delete(ctx, - volumeSnapshot2_updated.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace2Name, volumeSnapshot2.Name, pandoraSyncWaitTime) snapshotCreated2 = false - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - framework.Logf("Wait till the volume snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - volumeSnapshot2_updated.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + volumeSnapshot2_updated.ObjectMeta.Name, pandoraSyncWaitTime) err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, volumeSnapshot2_updated.ObjectMeta.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotContentCreated2 = false framework.Logf("Deleting volume snapshot content 1") - err = snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshot1_updated.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContent1Name, pandoraSyncWaitTime) - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshot1_updated.ObjectMeta.Name) + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + snapshot1_updated.ObjectMeta.Name, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated2 = false + snapshotContentCreated = false ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1644,32 +1812,30 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 6. Delete would return a pass from CSI side (this is expected because CSI is designed to return success even though it cannot find a snapshot in the backend) */ - ginkgo.It("Delete a non-existent snapshot", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Delete a non-existent snapshot", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { @@ -1679,92 +1845,48 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(volumeSnapshot_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent_updated, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent_updated.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent_updated.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete snapshot from CNS") err = e2eVSphere.deleteVolumeSnapshotInCNS(volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotCreated = false - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - - ginkgo.By("Delete volume snapshot") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - framework.Logf("Deleting volume snapshot content") - err = snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContent_updated.Name, metav1.DeleteOptions{}) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Wait till the volume snapshot content is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContent_updated.ObjectMeta.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false }) /* @@ -1774,32 +1896,33 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f ensure the default class is picked and honored for snapshot creation 3. Validate the fields after snapshot creation succeeds (snapshotClass, retentionPolicy) */ - ginkgo.It("Create snapshots using default VolumeSnapshotClass", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Create snapshots using default "+ + "VolumeSnapshotClass", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false + + var volumeSnapshotClass *snapV1.VolumeSnapshotClass + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { @@ -1809,86 +1932,69 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - vscSpec := getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil) - vscSpec.ObjectMeta.Annotations = map[string]string{ - "snapshot.storage.kubernetes.io/is-default-class": "true", - } - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - vscSpec, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + vscSpec := getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil) + vscSpec.ObjectMeta.Annotations = map[string]string{ + "snapshot.storage.kubernetes.io/is-default-class": "true", + } + volumeSnapshotClass, err = snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, + vscSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if guestCluster { + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vscSpec := getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil) + vscSpec.ObjectMeta.Annotations = map[string]string{ + "snapshot.storage.kubernetes.io/is-default-class": "true", + } + volumeSnapshotClass, err = snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, + vscSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } defer func() { + if guestCluster { + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpecWithoutSC(namespace, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + if guestCluster { + guestClusterRestConfig = getRestConfigClientForGuestCluster(guestClusterRestConfig) + snapc, err = snapclient.NewForConfig(guestClusterRestConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - gomega.Expect(*snapshotContent.Spec.VolumeSnapshotClassName).To(gomega.Equal(volumeSnapshotClass.Name)) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false - - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1900,34 +2006,31 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 3. Verify the error 4. Create with exact size and ensure it succeeds */ - ginkgo.It("Create Volume from snapshot with different size", func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Create Volume from snapshot with different size", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - defer func() { err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1935,67 +2038,39 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Create PVC using the higher size") pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, defaultrqLimit, storageclass, nil, v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) @@ -2020,40 +2095,41 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvc2Deleted = true + if guestCluster { + framework.Logf("Deleting pending PVCs from SVC namespace") + pvcList := getAllPVCFromNamespace(svcClient, svcNamespace) + for _, pvc := range pvcList.Items { + if pvc.Status.Phase == v1.ClaimPending { + framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(svcClient, pvc.Name, svcNamespace), + "Failed to delete PVC", pvc.Name) + } + } + } + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false - - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* - Snapshot workflow for statefulsets - 1. Create a statefulset with 3 replicas using a storageclass with volumeBindingMode set to Immediate - 2. Wait for pvcs to be in Bound state - 3. Wait for pods to be in Running state - 4. Create snapshot on 3rd replica's pvc (pvc as source) - 5. Scale down the statefulset to 2 - 6. Delete the pvc on which snapshot was created - 7. PVC delete succeeds but PV delete will fail as there is snapshot - expected - 8. Create a new PVC with same name (using the snapshot from step-4 as source) - verify a new PV is created - 9. Scale up the statefulset to 3 - 10. Verify if the new pod attaches to the PV created in step-8 - 11. Cleanup the sts and the snapshot + pv that was left behind in step-7 + Snapshot workflow for statefulsets + 1. Create a statefulset with 3 replicas using a storageclass with volumeBindingMode set to Immediate + 2. Wait for pvcs to be in Bound state + 3. Wait for pods to be in Running state + 4. Create snapshot on 3rd replica's pvc (pvc as source) + 5. Scale down the statefulset to 2 + 6. Delete the pvc on which snapshot was created + 7. PVC delete succeeds but PV delete will fail as there is snapshot - expected + 8. Create a new PVC with same name (using the snapshot from step-4 as source) - verify a new PV is created + 9. Scale up the statefulset to 3 + 10. Verify if the new pod attaches to the PV created in step-8 + 11. Cleanup the sts and the snapshot + pv that was left behind in step-7 */ - ginkgo.It("Snapshot workflow for statefulsets", func() { + ginkgo.It("[block-vanilla-snapshot] Snapshot workflow for statefulsets", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var err error @@ -2086,7 +2162,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = scName + Spec.StorageClassName = &scName *statefulset.Spec.Replicas = 2 CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -2196,7 +2272,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId1 := strings.Split(snapshothandle1, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle1, snapshotId1) + err = verifySnapshotIsCreatedInCNS(volHandle1, snapshotId1, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create a volume snapshot - 2") @@ -2241,7 +2317,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId2 := strings.Split(snapshothandle2, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle2, snapshotId2) + err = verifySnapshotIsCreatedInCNS(volHandle2, snapshotId2, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) @@ -2277,7 +2353,9 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Wait till the volume snapshot content is deleted") err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *snapshotToBeDeleted.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } err = fpv.DeletePersistentVolumeClaim(client, pvcToDelete.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2349,7 +2427,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotContentCreated2 = false ginkgo.By("Verify snapshot 1 entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle1, snapshotId1) + err = verifySnapshotIsDeletedInCNS(volHandle1, snapshotId1, false) if err != nil { if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2357,7 +2435,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f } ginkgo.By("Verify snapshot 2 entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle2, snapshotId2) + err = verifySnapshotIsDeletedInCNS(volHandle2, snapshotId2, false) if err != nil { if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2371,36 +2449,35 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 2. Create a dynamic snapshot using above pvc as source 3. Delete this pvc, expect the pvc to be deleted successfully 4. Underlying pv should not be deleted and should have a valid error - calling out that the volume has active snapshots + calling out that the volume has active snapshots (note: the storageclass here is set to Delete retentionPolicy) 5. Expect VolumeFailedDelete error with an appropriate err-msg 6. Run cleanup - delete the snapshots and then delete pv */ - ginkgo.It("Volume deletion with existing snapshots", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume deletion with existing snapshots", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim, persistentvolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { @@ -2412,67 +2489,39 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete PVC before deleting the snapshot") err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -2481,19 +2530,9 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f _, err = client.CoreV1().PersistentVolumes().Get(ctx, persistentvolumes[0].Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false - - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete PV") @@ -2509,32 +2548,30 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } - }) /* Create a pre-provisioned snapshot using VolumeSnapshotContent as source - (use VSC which is auto-created by a dynamic provisioned snapshot) + (use VSC which is auto-created by a dynamic provisioned snapshot) 1. create a sc, and pvc using this sc 2. create a dynamic snapshot using above pvc as source 3. verify that it auto-created a VolumeSnapshotContent object 4. create a pre-provisioned snapshot (which uses VolumeSnapshotContent as source) using the VSC from step(3) 5. Ensure this provisioning fails with appropriate error: SnapshotContentMismatch error */ - ginkgo.It("Create a pre-provisioned snapshot using VolumeSnapshotContent as source", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Create a pre-provisioned snapshot using "+ + "VolumeSnapshotContent as source", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error - var snapshotContentCreated = false - var snapshotCreated = false + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2542,11 +2579,11 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { @@ -2556,78 +2593,47 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = true - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Create a volume snapshot2") volumeSnapshot2, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, getVolumeSnapshotSpecByName(namespace, "static-vs", snapshotContent.ObjectMeta.Name), metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotCreated2 := true - defer func() { if snapshotCreated2 { - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, "static-vs", metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, "static-vs", pandoraSyncWaitTime) } }() @@ -2636,36 +2642,26 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).To(gomega.HaveOccurred()) framework.Logf("Snapshot details is %+v", staticSnapshot) - ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false - - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* - Pre-provisioned snapshot using incorrect/non-existing static snapshot - 1. Create a sc, and pvc using this sc - 2. Create a snapshot for this pvc (use CreateSnapshot CNS API) - 3. Create a VolumeSnapshotContent CR using above snapshot-id, by passing the snapshotHandle - 4. Create a VolumeSnapshot using above content as source - 5. VolumeSnapshot and VolumeSnapshotContent should be created successfully and readToUse set to True - 6. Delete the snapshot created in step-2 (use deleteSnapshots CNS API) - 7. VolumeSnapshot and VolumeSnapshotContent will still have readyToUse set to True - 8. Restore: Create a volume using above pre-provisioned snapshot k8s object - (note the snapshotHandle its pointing to has been deleted) - 9. Volume Create should fail with an appropriate error on k8s side + Pre-provisioned snapshot using incorrect/non-existing static snapshot + 1. Create a sc, and pvc using this sc + 2. Create a snapshot for this pvc (use CreateSnapshot CNS API) + 3. Create a VolumeSnapshotContent CR using above snapshot-id, by passing the snapshotHandle + 4. Create a VolumeSnapshot using above content as source + 5. VolumeSnapshot and VolumeSnapshotContent should be created successfully and readToUse set to True + 6. Delete the snapshot created in step-2 (use deleteSnapshots CNS API) + 7. VolumeSnapshot and VolumeSnapshotContent will still have readyToUse set to True + 8. Restore: Create a volume using above pre-provisioned snapshot k8s object + (note the snapshotHandle its pointing to has been deleted) + 9. Volume Create should fail with an appropriate error on k8s side */ - ginkgo.It("Pre-provisioned snapshot using incorrect/non-existing static snapshot", func() { + ginkgo.It("[block-vanilla-snapshot] Pre-provisioned snapshot using incorrect/non-existing static snapshot", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -2730,7 +2726,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }() ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotHandle := volHandle + "+" + snapshotId @@ -2749,9 +2745,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated2 { - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentNew.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentNew.ObjectMeta.Name, pandoraSyncWaitTime) } }() @@ -2777,8 +2772,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Snapshot details is %+v", staticSnapshot) ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, staticSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) snapshotCreated1 = false snapshotCreated2 = false @@ -2824,7 +2818,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 7. Validate the pvc is Bound 8. Cleanup the snapshot and pvc */ - ginkgo.It("Create a volume from a snapshot that is still not ready-to-use", func() { + ginkgo.It("[block-vanilla-snapshot] Create a volume from a snapshot that is still not ready-to-use", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -2889,7 +2883,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }() ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) snapshotHandle := volHandle + "+" + snapshotId @@ -2908,9 +2902,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated2 { - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - snapshotContentNew.ObjectMeta.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + snapshotContentNew.ObjectMeta.Name, pandoraSyncWaitTime) } }() @@ -2956,8 +2949,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Snapshot details is %+v", staticSnapshot) ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, staticSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) snapshotCreated1 = false snapshotCreated2 = false @@ -2978,18 +2970,18 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 6. The deployment should succeed and should have the file that was created in step.2 7. Cleanup dep-1 pv snapshots and pvs, delete dep-2 */ - ginkgo.It("Snapshot workflow for deployments", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Snapshot workflow for deployments", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim - var err error + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2997,11 +2989,11 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { @@ -3011,21 +3003,13 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - labelsMap := make(map[string]string) labelsMap["app"] = "test" - ginkgo.By("Creating a Deployment using pvc1") + ginkgo.By("Creating a Deployment using pvc1") dep, err := createDeployment(ctx, client, 1, labelsMap, nil, namespace, []*v1.PersistentVolumeClaim{pvclaim}, execRWXCommandPod1, false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { ginkgo.By("Delete Deployment") err := client.AppsV1().Deployments(namespace).Delete(ctx, dep.Name, metav1.DeleteOptions{}) @@ -3040,63 +3024,37 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - snapshotContentCreated := false - defer func() { if snapshotCreated { - framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Create a PVC using the snapshot created above") pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) @@ -3108,7 +3066,9 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } defer func() { err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3119,10 +3079,10 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f labelsMap2 := make(map[string]string) labelsMap2["app2"] = "test2" + ginkgo.By("Creating a new deployment from the restored pvc") dep2, err := createDeployment(ctx, client, 1, labelsMap2, nil, namespace, []*v1.PersistentVolumeClaim{pvclaim2}, "", false, busyBoxImageOnGcr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { ginkgo.By("Delete Deployment-2") err := client.AppsV1().Deployments(namespace).Delete(ctx, dep2.Name, metav1.DeleteOptions{}) @@ -3139,8 +3099,13 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -3153,31 +3118,31 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("Volume offline resize of a volume having snapshots", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume offline resize of a volume having snapshots", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", true, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Waiting for claim to be in bound phase") - pvc, err := fpv.WaitForPVClaimBoundPhase(client, - []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(pvc).NotTo(gomega.BeEmpty()) - pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) - volHandle := pv.Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) @@ -3192,6 +3157,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) + newDiskSize := "6Gi" framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) pvclaim, err = expandPVCSize(pvclaim, newSize, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3229,64 +3195,38 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - snapshotContentCreated := false - + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, newDiskSize) defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse("6Gi"))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Expanding current pvc") + ginkgo.By("Expanding current pvc before deleting volume snapshot") currentPvcSize = pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize = currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("6Gi")) @@ -3295,21 +3235,12 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f ginkgo.By("Snapshot webhook does not allow volume expansion on PVC") gomega.Expect(err).To(gomega.HaveOccurred()) - ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContent.ObjectMeta.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false - - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Expanding current pvc") + ginkgo.By("Expanding current pvc after deleting volume snapshot") currentPvcSize = pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize = currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("6Gi")) @@ -3360,17 +3291,18 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 6. Run resize and it should succeed 7. Cleanup the pvc */ - ginkgo.It("Volume online resize of a volume having snapshots", func() { + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Volume online resize of a volume having snapshots", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim - var err error + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", true, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -3378,13 +3310,12 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Waiting for claim to be in bound phase") - pvc, err := fpv.WaitForPVClaimBoundPhase(client, - []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(pvc).NotTo(gomega.BeEmpty()) - pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) - volHandle := pv.Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) @@ -3409,7 +3340,13 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f var vmUUID string nodeName := pod.Spec.NodeName - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) @@ -3420,6 +3357,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) + newDiskSize := "6Gi" framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) claims, err := expandPVCSize(pvclaim, newSize, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3449,63 +3387,36 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - snapshotContentCreated := false - + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, newDiskSize) defer func() { if snapshotContentCreated { - framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse("6Gi"))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = true - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) - - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] - - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Modify PVC spec to trigger volume expansion currentPvcSize = claims.Spec.Resources.Requests[v1.ResourceStorage] newSize = currentPvcSize.DeepCopy() @@ -3514,18 +3425,9 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f _, err = expandPVCSize(pvclaim, newSize, client) gomega.Expect(err).To(gomega.HaveOccurred()) - ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false - - framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContent.ObjectMeta.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotContentCreated = false - - ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Waiting for file system resize to finish") @@ -3605,7 +3507,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 7. bring the host back up 8. cleanup the snapshots, restore-pvc and source-pvc */ - ginkgo.It("Snapshot restore while the Host is Down", func() { + ginkgo.It("[block-vanilla-snapshot] Snapshot restore while the Host is Down", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -3666,9 +3568,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot1.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) } }() @@ -3690,7 +3590,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Identify the host on which the PV resides") @@ -3740,18 +3640,18 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }) /* - VC reboot with deployment pvcs having snapshot - 1. Create a sc and create 30 pvc's using this sc - 2. Create a deployment using 3 replicas and pvc's pointing to above - 3. Write some files to these PVCs - 4. Create snapshots on all the replica PVCs - 5. Reboot the VC - 6. Ensure the deployment comes up fine and data is available and we can write more data - 7. Create a new deployment, by creating new volumes using the snapshots cut prior to reboot - 8. Ensure the data written in step-4 is intanct - 9. Delete both deployments and. the pvcs + VC reboot with deployment pvcs having snapshot + 1. Create a sc and create 30 pvc's using this sc + 2. Create a deployment using 3 replicas and pvc's pointing to above + 3. Write some files to these PVCs + 4. Create snapshots on all the replica PVCs + 5. Reboot the VC + 6. Ensure the deployment comes up fine and data is available and we can write more data + 7. Create a new deployment, by creating new volumes using the snapshots cut prior to reboot + 8. Ensure the data written in step-4 is intanct + 9. Delete both deployments and. the pvcs */ - ginkgo.It("VC reboot with deployment pvcs having snapshot", func() { + ginkgo.It("[block-vanilla-snapshot] VC reboot with deployment pvcs having snapshot", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -3860,12 +3760,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Deleting volume snapshot") for _, snapshot := range volumesnapshots { - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot.Name, pandoraSyncWaitTime) } - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) }() ginkgo.By("Rebooting VC") @@ -3881,6 +3777,23 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f // After reboot. bootstrap() + fullSyncWaitTime := 0 + + if os.Getenv(envFullSyncWaitTime) != "" { + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Full sync interval can be 1 min at minimum so full sync wait time + // has to be more than 120s. + if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) + } + } else { + fullSyncWaitTime = defaultFullSyncWaitTime + } + + ginkgo.By(fmt.Sprintf("Double Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) + time.Sleep(time.Duration(2*fullSyncWaitTime) * time.Second) + ginkgo.By("Verify volume snapshot is created") for _, snapshot := range volumesnapshots { snapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot.Name) @@ -3907,7 +3820,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f restoredpvclaims = append(restoredpvclaims, pvclaim2) persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim2}, - framework.ClaimProvisionTimeout) + framework.ClaimProvisionTimeout*2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) @@ -3954,23 +3867,23 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) }) /* - VC password reset during snapshot creation - 1. Create a sc and pvc using this sc - 2. Create a volume snapshot using pvc as source - 3. Verify snapshot created successfully - 4. Change the VC administrator account password - 5. Create another snapshot - creation succeeds with previous csi session - 6. Update the vsphere.conf and the secret under vmware-system-csi ns and wait for 1-2 mins - 7. Create snapshot should succeed - 8. Delete snapshot - 9. Cleanup pvc/sc + VC password reset during snapshot creation + 1. Create a sc and pvc using this sc + 2. Create a volume snapshot using pvc as source + 3. Verify snapshot created successfully + 4. Change the VC administrator account password + 5. Create another snapshot - creation succeeds with previous csi session + 6. Update the vsphere.conf and the secret under vmware-system-csi ns and wait for 1-2 mins + 7. Create snapshot should succeed + 8. Delete snapshot + 9. Cleanup pvc/sc */ - ginkgo.It("VC password reset during snapshot creation", func() { + ginkgo.It("[block-vanilla-snapshot] VC password reset during snapshot creation", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -3979,6 +3892,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f var err error var snapshotCreated = false var snapshot3Created = false + nimbusGeneratedVcPwd := GetAndExpectStringEnvVar(nimbusVcPwd) ginkgo.By("Create storage class and PVC") scParameters[scParamDatastoreURL] = datastoreURL @@ -3993,7 +3907,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f ginkgo.By("Expect claim to provision volume successfully") pvclaims = append(pvclaims, pvclaim) - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, (2 * framework.ClaimProvisionTimeout)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) @@ -4032,9 +3946,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot1.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot1.Name, pandoraSyncWaitTime) } }() @@ -4055,7 +3967,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Fetching the username and password of the current vcenter session from secret") @@ -4071,14 +3983,17 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f username := vsphereCfg.Global.User originalPassword := vsphereCfg.Global.Password newPassword := e2eTestPassword - err = invokeVCenterChangePassword(username, originalPassword, newPassword, vcAddress) + ginkgo.By(fmt.Sprintf("Original password %s, new password %s", originalPassword, newPassword)) + err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, newPassword, vcAddress, + false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) originalVCPasswordChanged := true defer func() { if originalVCPasswordChanged { ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(username, newPassword, originalPassword, vcAddress) + err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, originalPassword, + vcAddress, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -4091,8 +4006,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshot2Created { - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, snapshot2.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot2.Name, pandoraSyncWaitTime) } }() @@ -4130,9 +4044,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshot3Created { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot3.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot3.Name, pandoraSyncWaitTime) } }() @@ -4153,11 +4065,11 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId3 := strings.Split(snapshothandle3, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId3) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId3, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(username, newPassword, originalPassword, vcAddress) + err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, originalPassword, vcAddress, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) originalVCPasswordChanged = false @@ -4191,42 +4103,55 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }) /* - Multi-master and snapshot workflow - 1. Create a multi-master k8s setup (3 masters) - 2. Create a sc and a pvc using this sc - 3. Create a snapshot on the above pvc (dynamic) and also create a volume using this snaphot as source - 4. Immediately, Bring down the master vm where csi controller pod is running - 5. Alternateively we can also stop the kubelet on this node - 6. Verify the snapshot and restore workflow succeeds - 7. validate from k8s side and CNS side - 8. Bring up the node and cleanup restore-pvc, snapshot and source-pvc + Multi-master and snapshot workflow + 1. Create a PVC. + 2. Create some dynamic volume snapshots. + 3. Kill csi-snapshotter container when creation of volumesnapshot is going on. + 4. Check if the snapshots go to Bound state. + 5. Create a volume using each of the snapshot. + 6. Kill csi-snapshotter container when restore operation is going on. + 7. Verify pvcs all are in Bound state. + 8. Cleanup all the snapshots and the pvc. */ - ginkgo.It("Multi-master and snapshot workflow", func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Multi-master and snapshot workflow", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim - var pvclaims []*v1.PersistentVolumeClaim var err error - container_name := "csi-snapshotter" + var snapshotContentCreated = false + var sshClientConfig, sshWcpConfig *ssh.ClientConfig + var csiControllerPod, k8sMasterIP, svcMasterIp, svcMasterPwd string + var volumeSnapshotNames []string + var volumeSnapshotContents []*snapV1.VolumeSnapshotContent + var snapshotOpsScale = 3 + if guestCluster { + snapshotOpsScale = 5 + } ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", false, "") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if guestCluster { + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Expect claim to provision volume successfully") - pvclaims = append(pvclaims, pvclaim) - - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { @@ -4237,16 +4162,8 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Verify using CNS Query API if VolumeID retrieved from PV is present. - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) - gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) - - ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -4256,121 +4173,207 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - sshClientConfig := &ssh.ClientConfig{ - User: "root", - Auth: []ssh.AuthMethod{ - ssh.Password(nimbusGeneratedK8sVmPwd), - }, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), + if vanillaCluster { + nimbusGeneratedK8sVmPwd := GetAndExpectStringEnvVar(nimbusK8sVmPwd) + + sshClientConfig = &ssh.ClientConfig{ + User: rootUser, + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + + /* Get current leader Csi-Controller-Pod where CSI Snapshotter is running and " + + find the master node IP where this Csi-Controller-Pod is running */ + ginkgo.By("Get current leader Csi-Controller-Pod name where csi-snapshotter is running and " + + "find the master node IP where this Csi-Controller-Pod is running") + csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + c, sshClientConfig, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("csi-snapshotter leader is in Pod %s "+ + "which is running on master node %s", csiControllerPod, k8sMasterIP) + } else if guestCluster { + svcMasterIp = GetAndExpectStringEnvVar(svcMasterIP) + svcMasterPwd = GetAndExpectStringEnvVar(svcMasterPassword) + framework.Logf("svc master ip: %s", svcMasterIp) + sshWcpConfig = &ssh.ClientConfig{ + User: rootUser, + Auth: []ssh.AuthMethod{ + ssh.Password(svcMasterPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + framework.Logf("sshwcpConfig: %v", sshWcpConfig) + csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + client, sshWcpConfig, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("%s leader is running on pod %s "+ + "which is running on master node %s", snapshotterContainerName, csiControllerPod, k8sMasterIP) } - /* Get current leader Csi-Controller-Pod where CSI Provisioner is running and " + - find the master node IP where this Csi-Controller-Pod is running */ - ginkgo.By("Get current leader Csi-Controller-Pod name where csi-snapshotter is running and " + - "find the master node IP where this Csi-Controller-Pod is running") - csi_controller_pod, k8sMasterIP, err := getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, - c, sshClientConfig, container_name) - framework.Logf("csi-snapshotter leader is in Pod %s "+ - "which is running on master node %s", csi_controller_pod, k8sMasterIP) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i := 0; i < snapshotOpsScale; i++ { + ginkgo.By("Create a volume snapshot") + framework.Logf("Creating snapshot no: %d", i+1) + volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + snapshotCreated := true + volumeSnapshotNames = append(volumeSnapshotNames, volumeSnapshot.Name) - ginkgo.By("Create a volume snapshot") - snapshot1, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", snapshot1.Name) - snapshotCreated := true - defer func() { - if snapshotCreated { - framework.Logf("In defer function, Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot1.Name, metav1.DeleteOptions{}) - if err != nil { - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } + defer func() { + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } + }() + + if i == 1 { + ginkgo.By("Kill container CSI-Snapshotter on the master node where elected leader " + + "csi controller pod is running") + + if vanillaCluster { + /* Delete elected leader CSI-Controller-Pod where csi-snapshotter is running */ + csipods, err := client.CoreV1().Pods(csiSystemNamespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Delete elected leader CSi-Controller-Pod where csi-snapshotter is running") + err = deleteCsiControllerPodWhereLeaderIsRunning(ctx, client, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitForPodsRunningReady(c, csiSystemNamespace, int32(csipods.Size()), + 0, pollTimeoutShort*2, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if guestCluster { + err = execStopContainerOnGc(sshWcpConfig, svcMasterIp, + snapshotterContainerName, k8sMasterIP, svcNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } - }() + } - /* Delete elected leader CSI-Controller-Pod where csi-snapshotter is running */ - csipods, err := client.CoreV1().Pods(csiSystemNamespace).List(ctx, metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete elected leader CSi-Controller-Pod where csi-snapshotter is running") - err = deleteCsiControllerPodWhereLeaderIsRunning(ctx, client, csi_controller_pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpod.WaitForPodsRunningReady(c, csiSystemNamespace, int32(csipods.Size()), - 0, pollTimeoutShort*2, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for i := 0; i < snapshotOpsScale; i++ { + ginkgo.By("Verify volume snapshot is created") + framework.Logf("snapshot name: %s", volumeSnapshotNames[i]) + volumeSnapshot, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshotNames[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) + framework.Logf("VolumeSnapshot Name: %s", volumeSnapshot.Name) - ginkgo.By("Verify volume snapshot is Ready to use") - snapshot1_updated, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, snapshot1.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(snapshot1_updated.Status.RestoreSize.Cmp(resource.MustParse(diskSize))).To(gomega.BeZero()) + ginkgo.By("Verify volume snapshot content is created") + snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + snapshotContentCreated = true + gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) + framework.Logf("VolumeSnapshotContent Name: %s", snapshotContent.Name) + volumeSnapshotContents = append(volumeSnapshotContents, snapshotContent) - ginkgo.By("Verify volume snapshot content is created") - snapshotContent1, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *snapshot1_updated.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(*snapshotContent1.Status.ReadyToUse).To(gomega.BeTrue()) + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent, volumeSnapshotClass, + volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("snapshot Id: %s", snapshotId) - framework.Logf("Get volume snapshot ID from snapshot handle") - snapshothandle := *snapshotContent1.Status.SnapshotHandle - snapshotId := strings.Split(snapshothandle, "+")[1] + ginkgo.By("Query CNS and check the volume snapshot entry") + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } - ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + /* Get current leader Csi-Controller-Pod where CSI Snapshotter is running and " + + find the master node IP where this Csi-Controller-Pod is running */ + ginkgo.By("Get current leader Csi-Controller-Pod name where csi-snapshotter is running and " + + "find the master node IP where this Csi-Controller-Pod is running") + csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + c, sshClientConfig, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("csi-snapshotter leader is in Pod %s "+ + "which is running on master node %s", csiControllerPod, k8sMasterIP) + } else if guestCluster { + framework.Logf("sshwcpConfig: %v", sshWcpConfig) + csiControllerPod, k8sMasterIP, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + client, sshWcpConfig, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("%s leader is running on pod %s "+ + "which is running on master node %s", snapshotterContainerName, csiControllerPod, k8sMasterIP) + } - ginkgo.By("Create PVC from snapshot") - pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, - v1.ReadWriteOnce, snapshot1.Name, snapshotapigroup) + for i := 0; i < snapshotOpsScale; i++ { + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshotNames[i], snapshotapigroup) - pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - csi_controller_pod, _, err = getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, - c, sshClientConfig, container_name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - /* Delete elected leader CSI-Controller-Pod where csi-snapshotter is running */ - ginkgo.By("Delete elected leader CSi-Controller-Pod where csi-snapshotter is running") - err = deleteCsiControllerPodWhereLeaderIsRunning(ctx, client, csi_controller_pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if i == 1 { + if vanillaCluster { + /* Delete elected leader CSI-Controller-Pod where csi-snapshotter is running */ + csipods, err := client.CoreV1().Pods(csiSystemNamespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Delete elected leader CSi-Controller-Pod where csi-snapshotter is running") + err = deleteCsiControllerPodWhereLeaderIsRunning(ctx, client, snapshotterContainerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitForPodsRunningReady(c, csiSystemNamespace, int32(csipods.Size()), + 0, pollTimeoutShort*2, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if guestCluster { + err = execStopContainerOnGc(sshWcpConfig, svcMasterIp, + snapshotterContainerName, k8sMasterIP, svcNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } - persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(client, - []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle - gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + framework.Logf("Waiting for PVCs to come to bound state") + persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) - defer func() { - ginkgo.By("In defer function deleting PVC2") - err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + ginkgo.By("Deleting PVC2") + err = fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() + } - framework.Logf("Deleting volume snapshot") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot1.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - snapshotCreated = false + for i := 0; i < snapshotOpsScale; i++ { + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, volumeSnapshotContents[i], volumeSnapshotClass, + volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, + volumeSnapshotNames[i], metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Delete volume snapshot") + _, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }) /* - Max Snapshots per volume test - 1. Check the default configuration: - 2. Modify global-max-snapshots-per-block-volume field in vsphere-csi.conf - 3. Ensure this can be set to different values and it honors this configuration during snap create - 4. Check behavior when it is set to 0 and 5 as well - 5. Validate creation of additional snapshots beyond the configured - max-snapshots per volume fails - check error returned + Max Snapshots per volume test + 1. Check the default configuration: + 2. Modify global-max-snapshots-per-block-volume field in vsphere-csi.conf + 3. Ensure this can be set to different values and it honors this configuration during snap create + 4. Check behavior when it is set to 0 and 5 as well + 5. Validate creation of additional snapshots beyond the configured + max-snapshots per volume fails - check error returned */ - ginkgo.It("Max Snapshots per volume test", func() { + ginkgo.It("[block-vanilla-snapshot] Max Snapshots per volume test", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -4458,16 +4461,14 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } defer func() { for _, snapName := range snapshotNames { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapName, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapName, pandoraSyncWaitTime) } }() @@ -4479,8 +4480,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshot2Created { - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, snapshot2.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot2.Name, pandoraSyncWaitTime) } }() @@ -4490,12 +4490,11 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f framework.Logf("Snapshot details is %+v", snapshot2) ginkgo.By("Delete failed snapshot") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, snapshot2.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot2.Name, pandoraSyncWaitTime) snapshot2Created = false - ginkgo.By("Modifying the default max snapshots per volume in the secret to 5") - vsphereCfg.Snapshot.GlobalMaxSnapshotsPerBlockVolume = 5 + ginkgo.By("Modifying the default max snapshots per volume in the secret to 6") + vsphereCfg.Snapshot.GlobalMaxSnapshotsPerBlockVolume = 6 modifiedConf, err := writeConfigToSecretString(vsphereCfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4515,6 +4514,24 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + // Get CSI Controller's replica count from the setup + deployment, err := client.AppsV1().Deployments(csiSystemNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicaCount := *deployment.Spec.Replicas + + ginkgo.By("Bring down csi-controller pod") + bringDownCsiController(client) + isCSIDown := true + defer func() { + if !isCSIDown { + bringUpCsiController(client, csiReplicaCount) + } + }() + + bringUpCsiController(client, csiReplicaCount) + isCSIDown = false + for j := 4; j <= 5; j++ { ginkgo.By(fmt.Sprintf("Create a volume snapshot - %d", j)) snapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, @@ -4539,7 +4556,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -4565,54 +4582,55 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }) /* - Volume snapshot creation when resize is in progress - 1. Create a pvc and resize the pvc (say from 2GB to 4GB) - 2. While the resize operation is in progress, create a snapshot on this volume - 3. Expected behavior: resize operation should succeed and the - snapshot creation should succeed after resize completes + Volume snapshot creation when resize is in progress + 1. Create a pvc and resize the pvc (say from 2GB to 4GB) + 2. While the resize operation is in progress, create a snapshot on this volume + 3. Expected behavior: resize operation should succeed and the + snapshot creation should succeed after resize completes */ - ginkgo.It("Volume snapshot creation when resize is in progress", func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Volume snapshot creation when resize is in progress", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var storageclass *storagev1.StorageClass - var pvclaim *v1.PersistentVolumeClaim + + var volumeSnapshot *snapV1.VolumeSnapshot + var snapshotContent *snapV1.VolumeSnapshotContent + var snapshotCreated, snapshotContentCreated bool + var snapshotId string var err error + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err = createPVCAndStorageClass(client, - namespace, nil, scParameters, diskSize, nil, "", true, "") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Waiting for claim to be in bound phase") - pvc, err := fpv.WaitForPVClaimBoundPhase(client, - []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(pvc).NotTo(gomega.BeEmpty()) - pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) - volHandle := pv.Spec.CSI.VolumeHandle - + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) defer func() { - err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - if volHandle != "" { - err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } }() ginkgo.By("Creating pod to attach PV to the node") pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execRWXCommandPod1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) err = fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -4620,7 +4638,13 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f var vmUUID string nodeName := pod.Spec.NodeName - vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) @@ -4631,48 +4655,54 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] newSize := currentPvcSize.DeepCopy() newSize.Add(resource.MustParse("4Gi")) + newDiskSize := "6Gi" framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) claims, err := expandPVCSize(pvclaim, newSize, client) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(claims).NotTo(gomega.BeNil()) ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, - volumeSnapshotClass.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() - ginkgo.By("Create a volume snapshot") - volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, - getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) - snapshotCreated := true - + ginkgo.By("Create a dynamic volume snapshot") + if vanillaCluster { + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err = createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if guestCluster { + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err = createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, newDiskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot.Name, metav1.DeleteOptions{}) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() - ginkgo.By("Verify volume snapshot is created") - volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse("2Gi"))).To(gomega.BeZero()) - - ginkgo.By("Verify volume snapshot content is created") - snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) ginkgo.By("Waiting for file system resize to finish") claims, err = waitForFSResize(pvclaim, client) @@ -4696,6 +4726,11 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -4704,7 +4739,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 2. Create Snapshot class and take a snapshot of the volume 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("Volume provision and snapshot creation/restore on VVOL Datastore", func() { + ginkgo.It("[block-vanilla-snapshot] Volume provision and snapshot creation/restore on VVOL Datastore", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -4773,16 +4808,17 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() @@ -4804,7 +4840,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC from snapshot") @@ -4828,9 +4864,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }() framework.Logf("Deleting volume snapshot") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) snapshotCreated = false err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContent.ObjectMeta.Name) @@ -4838,7 +4872,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotContentCreated = false ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -4848,7 +4882,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 2. Create Snapshot class and take a snapshot of the volume 3. Cleanup of snapshot, pvc and sc */ - ginkgo.It("Volume provision and snapshot creation/restore on VMFS Datastore", func() { + ginkgo.It("[block-vanilla-snapshot] Volume provision and snapshot creation/restore on VMFS Datastore", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -4917,16 +4951,17 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() @@ -4948,7 +4983,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create PVC from snapshot") @@ -4972,9 +5007,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f }() framework.Logf("Deleting volume snapshot") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) snapshotCreated = false err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContent.ObjectMeta.Name) @@ -4982,20 +5015,20 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f snapshotContentCreated = false ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* - Scale-up creation of snapshots across multiple volumes - - 1. Create a few pvcs (around 25) - 2. Trigger parallel snapshot create calls on all pvcs - 3. Trigger parallel snapshot delete calls on all pvcs - 4. All calls in (2) and (3) should succeed since these are - triggered via k8s API (might take longer time) - 5. Trigger create/delete calls and ensure there are no stale entries left behind - 6. Create multiple volumes from the same snapshot + Scale-up creation of snapshots across multiple volumes + + 1. Create a few pvcs (around 25) + 2. Trigger parallel snapshot create calls on all pvcs + 3. Trigger parallel snapshot delete calls on all pvcs + 4. All calls in (2) and (3) should succeed since these are + triggered via k8s API (might take longer time) + 5. Trigger create/delete calls and ensure there are no stale entries left behind + 6. Create multiple volumes from the same snapshot */ /* @@ -5008,21 +5041,24 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f 4. Volume restore 5. snapshot create/delete workflow */ - ginkgo.It("Scale-up creation of snapshots across multiple volumes", func() { + ginkgo.It("[block-vanilla-snapshot][tkg-snapshot] Scale-up creation of snapshots across multiple volumes", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var storageclass *storagev1.StorageClass volumesnapshots := make([]*snapV1.VolumeSnapshot, volumeOpsScale) snapshotContents := make([]*snapV1.VolumeSnapshotContent, volumeOpsScale) pvclaims := make([]*v1.PersistentVolumeClaim, volumeOpsScale) pvclaims2 := make([]*v1.PersistentVolumeClaim, volumeOpsScale) - var persistentvolumes []*v1.PersistentVolume - var err error ginkgo.By("Create storage class and PVC") - scParameters[scParamDatastoreURL] = datastoreURL + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } curtime := time.Now().Unix() randomValue := rand.Int() @@ -5032,21 +5068,20 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f scName := "snapshot-scale" + curtimestring + val storageclass, err = createStorageClass(client, scParameters, nil, "", "", false, scName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Create volume snapshot class") - volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, - getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } }() ginkgo.By("Creating PVCs using the Storage Class") @@ -5100,9 +5135,7 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f for i := 0; i < volumeOpsScale; i++ { framework.Logf("Deleting volume snapshot") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumesnapshots[i].Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumesnapshots[i].Name, pandoraSyncWaitTime) err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshotContents[i].ObjectMeta.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -5127,4 +5160,1497 @@ var _ = ginkgo.Describe("[block-vanilla-snapshot] Volume Snapshot Basic Test", f } }() }) + + /* Create/Delete snapshot via k8s API using VolumeSnapshotContent (Pre-Provisioned Snapshots) + + //Steps to create pre-provisioned snapshot in Guest Cluster + + 1. In this approach create a dynamic VolumeSnapshot in Guest with a VolumeSnapshotClass with “Delete” + deletion policy. + 2. Note the VolumeSnapshot name created on the Supervisor. + 3. Explicitly change the deletionPolicy of VolumeSnapshotContent on Guest to “Retain”. + 4. Delete the VolumeSnapshot. This will leave the VolumeSnapshotContent on the Guest as is, + since deletionPolicy was “Retain” + 5. Explicitly delete the VolumeSnapshotContent. + 6. In this approach, we now have Supervisor VolumeSnapshot that doesn’t have a corresponding + VolumeSnapshot-VolumeSnapshotContent on Guest. + 7. Create a VolumeSnapshotContent that points to the Supervisor VolumeSnapshot, and create a + VolumeSnapshot on Guest that point to the VolumeSnapshotContent. + + // TestCase Steps + 1. Create a storage class and create a pvc using this SC + 2. The volumesnapshotclass is set to delete + 3. Create a dynamic volume snapshot + 4. Create a pre-provisoned snapshot following th steps mentioned above + 5. Perform cleanup + */ + + ginkgo.It("[tkg-snapshot] Verify pre-provisioned static snapshot workflow", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, dynamicSnapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim, volHandle, diskSize) + defer func() { + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + framework.Logf("Get volume snapshot handle from Supervisor Cluster") + snapshotId, _, svcVolumeSnapshotName, err := getSnapshotHandleFromSupervisorCluster(ctx, + volumeSnapshotClass, *snapshotContent.Status.SnapshotHandle) + + ginkgo.By("Create pre-provisioned snapshot") + _, staticSnapshot, staticSnapshotContentCreated, + staticSnapshotCreated, err := createPreProvisionedSnapshotInGuestCluster(ctx, volumeSnapshot, snapshotContent, + snapc, namespace, pandoraSyncWaitTime, svcVolumeSnapshotName, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if staticSnapshotCreated { + framework.Logf("Deleting static volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if staticSnapshotContentCreated { + framework.Logf("Deleting static volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *staticSnapshot.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete pre-provisioned snapshot") + staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, dynamicSnapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Volume restore using snapshot (a) dynamic snapshot (b) pre-provisioned snapshot + 1. Create a sc, a pvc and attach the pvc to a pod, write a file + 2. Create pre-provisioned and dynamically provisioned snapshots using this pvc + 3. Create new volumes (pvcFromPreProvSS and pvcFromDynamicSS) using these + snapshots as source, use the same sc + 4. Ensure the pvc gets provisioned and is Bound + 5. Attach the pvc to a pod and ensure data from snapshot is available + (file that was written in step.1 should be available) + 6. And also write new data to the restored volumes and it should succeed + 7. Delete the snapshots and pvcs/pods created in steps 1,2,3 + 8. Continue to write new data to the restore volumes and it should succeed + 9. Create new snapshots on restore volume and verify it succeeds + 10. Run cleanup: Delete snapshots, restored-volumes, pods + */ + + ginkgo.It("[tkg-snapshot] Volume restore using dynamic and pre-provisioned snapshot on guest cluster", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var staticSnapshotCreated, staticSnapshotContentCreated bool + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore PVC using dynamic volume snapshot") + pvclaim2, persistentVolumes2, pod := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot, true) + volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + framework.Logf("Get volume snapshot handle from Supervisor Cluster") + _, _, svcVolumeSnapshotName, err := getSnapshotHandleFromSupervisorCluster(ctx, volumeSnapshotClass, + *snapshotContent.Status.SnapshotHandle) + + ginkgo.By("Create pre-provisioned snapshot in Guest Cluster") + _, staticSnapshot, staticSnapshotContentCreated, + staticSnapshotCreated, err := createPreProvisionedSnapshotInGuestCluster(ctx, volumeSnapshot, snapshotContent, + snapc, namespace, pandoraSyncWaitTime, svcVolumeSnapshotName, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if staticSnapshotCreated { + framework.Logf("Deleting static volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if staticSnapshotContentCreated { + framework.Logf("Deleting static volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *staticSnapshot.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore PVC using pre-provisioned snapshot") + pvclaim3, persistentVolumes3, pod2 := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, staticSnapshot, true) + volHandle3 := persistentVolumes3[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle3 = getVolumeIDFromSupervisorCluster(volHandle3) + } + gomega.Expect(volHandle3).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim3.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Take a snapshot of restored PVC created from dynamic snapshot") + volumeSnapshot3, _, snapshotCreated3, + snapshotContentCreated3, snapshotId3, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim2, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated3 { + framework.Logf("Deleting volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot3.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated3 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot3, pandoraSyncWaitTime, volHandle2, snapshotId3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete pre-provisioned snapshot") + staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Pre-provisioned snapshot using incorrect/non-existing static snapshot + 1. Create a sc, and pvc using this sc + 2. Create a snapshot for this pvc + 3. Create a VolumeSnapshotContent CR using above snapshot-id, by passing the snapshotHandle + 4. Create a VolumeSnapshot using above content as source + 5. VolumeSnapshot and VolumeSnapshotContent should be created successfully and readToUse set to True + 6. Delete the snapshot created in step-4 + 7. Restore: Create a volume using above pre-provisioned snapshot k8s object + (note the snapshotHandle its pointing to has been deleted) + 8. Volume Create should fail with an appropriate error on k8s side + */ + ginkgo.It("[tkg-snapshot] Restore volume using non-existing static snapshot", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var staticSnapshotCreated, staticSnapshotContentCreated bool + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + framework.Logf("Get volume snapshot handle from Supervisor Cluster") + staticSnapshotId, _, svcVolumeSnapshotName, err := getSnapshotHandleFromSupervisorCluster(ctx, + volumeSnapshotClass, *snapshotContent.Status.SnapshotHandle) + + ginkgo.By("Create pre-provisioned snapshot") + _, staticSnapshot, staticSnapshotContentCreated, + staticSnapshotCreated, err := createPreProvisionedSnapshotInGuestCluster(ctx, volumeSnapshot, snapshotContent, + snapc, namespace, pandoraSyncWaitTime, svcVolumeSnapshotName, diskSize) + defer func() { + if staticSnapshotCreated { + framework.Logf("Deleting static volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if staticSnapshotContentCreated { + framework.Logf("Deleting static volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *staticSnapshot.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete static volume snapshot") + staticSnapshotCreated, staticSnapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + staticSnapshot, pandoraSyncWaitTime, volHandle, staticSnapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC using the snapshot deleted") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, staticSnapshot.Name, snapshotapigroup) + + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim2}, + framework.ClaimProvisionShortTimeout) + gomega.Expect(err).To(gomega.HaveOccurred()) + + expectedErrMsg := "error getting handle for DataSource Type VolumeSnapshot by Name" + ginkgo.By(fmt.Sprintf("Expected failure message: %+q", expectedErrMsg)) + isFailureFound := checkEventsforError(client, namespace, + metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s", pvclaim2.Name)}, + expectedErrMsg) + gomega.Expect(isFailureFound).To(gomega.BeTrue(), + fmt.Sprintf("Expected pvc creation failure with error message: %s", expectedErrMsg)) + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + }) + + /* + Create a volume from a snapshot that is still not ready-to-use + 1. Create a pre-provisioned snapshot pointing to a VolumeSnapshotContent + which is still not provisioned (or does not exist) + 2. The snapshot will have status.readyToUse: false and snapshot is in Pending state + 3. Create a volume using the above snapshot as source and ensure the provisioning fails with error: + ProvisioningFailed | snapshot <> not bound + 4. pvc is stuck in Pending + 5. Once the VolumeSnapshotContent is created, snapshot should have status.readyToUse: true + 6. The volume should now get provisioned successfully + 7. Validate the pvc is Bound + 8. Cleanup the snapshot and pvc + */ + ginkgo.It("[tkg-snapshot] Restore volume from a static snapshot that is still not ready-to-use", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, _, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + framework.Logf("Get volume snapshot handle from Supervisor Cluster") + snapshotId, _, svcVolumeSnapshotName, err := getSnapshotHandleFromSupervisorCluster(ctx, volumeSnapshotClass, + *snapshotContent.Status.SnapshotHandle) + + ginkgo.By("Create Pre-provisioned snapshot in Guest Cluster") + framework.Logf("Change the deletion policy of VolumeSnapshotContent from Delete to Retain " + + "in Guest Cluster") + updatedSnapshotContent, err := changeDeletionPolicyOfVolumeSnapshotContent(ctx, + snapshotContent, snapc, namespace, snapV1.VolumeSnapshotContentRetain) + + framework.Logf("Delete dynamic volume snapshot from Guest Cluster") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Delete VolumeSnapshotContent from Guest Cluster explicitly") + err = deleteVolumeSnapshotContent(ctx, updatedSnapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Creating static VolumeSnapshotContent in Guest Cluster using "+ + "supervisor VolumeSnapshotName %s", svcVolumeSnapshotName) + staticSnapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Create(ctx, + getVolumeSnapshotContentSpec(snapV1.DeletionPolicy("Delete"), svcVolumeSnapshotName, + "static-vs", namespace), metav1.CreateOptions{}) + + framework.Logf("Verify VolumeSnapshotContent is created or not in Guest Cluster") + staticSnapshotContent, err = snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + staticSnapshotContent.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Snapshotcontent name is %s", staticSnapshotContent.ObjectMeta.Name) + if !*staticSnapshotContent.Status.ReadyToUse { + framework.Logf("VolumeSnapshotContent is not ready to use") + } + + ginkgo.By("Create a static volume snapshot using static snapshotcontent") + staticVolumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpecByName(namespace, "static-vs", staticSnapshotContent.ObjectMeta.Name), + metav1.CreateOptions{}) + if err != nil { + framework.Logf("failed to create static volume snapshot: %v", err) + } + framework.Logf("Volume snapshot name is : %s", staticVolumeSnapshot.Name) + + ginkgo.By("Create PVC while snapshot is still provisioning") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, "static-vs", snapshotapigroup) + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify static volume snapshot is created") + staticSnapshot, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, staticVolumeSnapshot.Name) + if err != nil { + framework.Logf("failed to wait for volume snapshot: %v", err) + } + if staticSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 { + framework.Logf("expected RestoreSize does not match") + } + framework.Logf("Snapshot details is %+v", staticSnapshot) + + ginkgo.By("Delete pre-provisioned snapshot") + staticSnapshotCreated, staticSnapshotContentCreated, err := deleteVolumeSnapshot(ctx, snapc, namespace, + staticSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if staticSnapshotCreated { + framework.Logf("Deleting static volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, staticSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if staticSnapshotContentCreated { + framework.Logf("Deleting static volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *staticSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *staticSnapshot.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Create snapshot on Supervisor cluster + + Create a Storage Class, and a PVC using this SC in Supervisor cluster. + Create a dynamic snapshot in supervisor using above PVC as source + Snapshot creation should fail with appropriate error message. + Cleanup the snapshots, PVC and SC + */ + + ginkgo.It("[tkg-snapshot] Verify Snapshot creation should fail on supervisor cluster", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + storageclass, err := svcClient.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvclaim, err := fpv.CreatePVC(svcClient, svcNamespace, + getPersistentVolumeClaimSpecWithStorageClass(svcNamespace, diskSize, storageclass, nil, v1.ReadWriteOnce)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(svcClient, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err = fpv.DeletePersistentVolumeClaim(svcClient, pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expected snapshot creation failure on supervisor cluster") + _, err = snapc.SnapshotV1().VolumeSnapshots(svcNamespace).Create(ctx, + getVolumeSnapshotSpec(svcNamespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + if err != nil { + framework.Logf(err.Error()) + } + }) + + /* + Perform online resize on restored volume + 1. Create a Storage Class, a PVC and attach the PVC to a Pod, write a file + 2. Create dynamically provisioned snapshots using this PVC + 3. Create new volume using this snapshots as source, use the same SC and attach it to a Pod. + 4. Ensure the PVC gets provisioned and is Bound. + 5. Verify the previous snapshot data is intact and write new data to restored volume + 6. Perform online resize on the restored volume and make sure resize should go fine. + 7. Create dynamically provisioned snapshots using the PVC created in step #4 + 8. Verify snapshot size. It should be same as that of restored volume size. + 9. Run cleanup: Delete snapshots, restored-volumes, pods. + */ + ginkgo.It("[tkg-snapshot] Perform online resize on restored volume", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var snapshotContentCreated = false + var snapshotCreated = false + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create/Get volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if vanillaCluster { + err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume snapshot") + volumeSnapshot, _, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + + defer func() { + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } + }() + + ginkgo.By("Create PVC from Snapshot and verify restore volume operations") + pvclaim2, persistentVolumes2, pod := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot, true) + volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + svcPVCName2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim2.Name, namespace)) + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Perform online resize on the restored volume and make sure resize should go fine") + verifyOnlineVolumeExpansionOnGc(client, namespace, svcPVCName2, volHandle, pvclaim2, pod, f) + + ginkgo.By("Create a volume snapshot from restored volume") + volumeSnapshotFromRestoreVol, snapshotContentFromRestoreVol, snapshotCreated, + snapshotContentCreated, snapshotIdFromRestoreVol, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, + volumeSnapshotClass, pvclaim2, volHandle2, "3Gi") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Volume snapshot name is : %s", volumeSnapshotFromRestoreVol.Name) + snapshotCreated = true + + defer func() { + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = deleteVolumeSnapshotContent(ctx, snapshotContentFromRestoreVol, + snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshotFromRestoreVol.Name, pandoraSyncWaitTime) + } + }() + + framework.Logf("Deleting volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshotFromRestoreVol, pandoraSyncWaitTime, volHandle2, snapshotIdFromRestoreVol) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* + Offline relocation of FCD with snapshots + 1. Create a Storage Class, and a PVC. + 2. Ensure the Volume-snapshot and VolumeSnapshotContent is created and Bound + 3. Run FCD relocate on this volume using CNS side APIs + 4. If relocate is supported, create new snapshots after relocate is successful + 5. Verify snapshot status which we took before relocating FCD. + 6. Create new volume using this snapshot as source, use the same SC and attach it to a Pod. + 7. Run cleanup: Delete snapshots, restored-volumes, pods. + */ + ginkgo.It("[tkg-snapshot] Offline relocation of FCD with snapshots", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var snapshotContentCreated, snapshotCreated bool + var datastoreUrls []string + + sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedVsanDatastoreURL := os.Getenv(envSharedDatastoreURL) + if sharedVsanDatastoreURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL)) + } + datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedVsanDatastoreURL) + + storagePolicyName = os.Getenv(envStoragePolicyNameForVsanVmfsDatastores) + if storagePolicyName == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envStoragePolicyNameForVsanVmfsDatastores)) + } + + ginkgo.By("Create storage class and PVC") + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create/Get volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if vanillaCluster { + err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + + defer func() { + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volHandle) + framework.Logf("Volume: %s is present on %s", volHandle, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volHandle, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + break + } + } + + ginkgo.By("Relocate FCD to another datastore") + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + _, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volHandle, dsRefDest, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create PVC from snapshot") + pvclaim2, persistentVolumes2, pod := verifyVolumeRestoreOperation(ctx, client, + namespace, storageclass, volumeSnapshot, true) + volHandle2 := persistentVolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim2.Name, namespace)) + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + framework.Logf("Deleting volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* + Dynamic snapshot created in one guest cluster and restore it on another guest cluster + 1. Create a SC and PVC using this SC and attach it to Pod. Write some data on it. + 2. Create a volume snapshot using this PVC as source in Guest Cluster GC-1 and bound. + 3. Restore volume snapshot created in step #2 in another Guest Cluster GC-2 + 4. Verify restore volume creation status in another GC fails with appropriate error. + 5. Run cleanup: Delete snapshots, restored-volumes, pods. + */ + ginkgo.It("[tkg-snapshot] Dynamic snapshot created in one guest cluster "+ + "and restore it on another guest cluster", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var snapshotContentCreated, snapshotCreated bool + + newGcKubconfigPath := os.Getenv("NEW_GUEST_CLUSTER_KUBE_CONFIG") + if newGcKubconfigPath == "" { + ginkgo.Skip("Env NEW_GUEST_CLUSTER_KUBE_CONFIG is missing") + } + clientNewGc, err = createKubernetesClientFromConfig(newGcKubconfigPath) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Error creating k8s client with %v: %v", newGcKubconfigPath, err)) + + ginkgo.By("Create storage class and PVC") + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create/Get volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if vanillaCluster { + err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume snapshot") + volumeSnapshot, _, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + + defer func() { + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } + }() + + ginkgo.By("Creating namespace on second GC") + ns, err := framework.CreateTestingNS(f.BaseName, clientNewGc, map[string]string{ + "e2e-framework": f.BaseName, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Error creating namespace on second GC") + + namespaceNewGC := ns.Name + framework.Logf("Created namespace on second GC %v", namespaceNewGC) + defer func() { + err := clientNewGc.CoreV1().Namespaces().Delete(ctx, namespaceNewGC, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespaceNewGC, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + + pvclaim2, err := fpv.CreatePVC(clientNewGc, namespaceNewGC, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim2.Name, namespace)) + err = fpv.DeletePersistentVolumeClaim(clientNewGc, pvclaim2.Name, namespaceNewGC) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + _, err = fpv.WaitForPVClaimBoundPhase(clientNewGc, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).To(gomega.HaveOccurred()) + expectedErrMsg := "error getting handle for DataSource Type VolumeSnapshot by Name " + volumeSnapshot.Name + framework.Logf("Expected failure message: %+q", expectedErrMsg) + err = waitForEvent(ctx, clientNewGc, namespaceNewGC, expectedErrMsg, pvclaim2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + + ginkgo.By("Delete PVC created from snapshot") + err = fpv.DeletePersistentVolumeClaim(clientNewGc, pvclaim2.Name, namespaceNewGC) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Volume mode conversion + 1. Create a Storage Class, PVC. + 2. Create Dynamic Provisioned snapshot on above PVC. + 3. Verify VolumeSnapshot and VolumeSnapshotContent status. + 4. Create new volume using snapshot created in step #4, but this time + give access mode like ReadWriteMany or ReadOnlymany or ReadOnlyOncePod) + 5. Restore PVC creation should fail and be stuck in Pending state with appropriate error message. + 6. Perform Cleanup. + */ + ginkgo.It("[tkg-snapshot] Volume mode conversion", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var snapshotContentCreated, snapshotCreated bool + + ginkgo.By("Create storage class and PVC") + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim.Name, namespace)) + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create/Get volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if vanillaCluster { + err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + + defer func() { + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + } + }() + + accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteMany, v1.ReadOnlyMany} + + for _, accessMode := range accessModes { + ginkgo.By(fmt.Sprintf("Create PVC from snapshot with %s access mode", accessMode)) + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + accessMode, volumeSnapshot.Name, snapshotapigroup) + + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + framework.Logf("Error from creating pvc with %s accessmode is : %s", accessMode, err.Error()) + gomega.Expect(err).To(gomega.HaveOccurred()) + + expectedErrMsg := "no datastores found to create file volume" + framework.Logf("Expected failure message: %+q", expectedErrMsg) + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + + ginkgo.By(fmt.Sprintf("Deleting the pvc %s in namespace %s", pvclaim2.Name, namespace)) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = waitForPvcToBeDeleted(ctx, client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + + framework.Logf("Deleting pending PVCs from SVC namespace") + pvcList := getAllPVCFromNamespace(svcClient, svcNamespace) + for _, pvc := range pvcList.Items { + if pvc.Status.Phase == v1.ClaimPending { + framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(svcClient, pvc.Name, svcNamespace), + "Failed to delete PVC", pvc.Name) + } + } + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Volume snapshot creation on a file-share volume + Create a file-share pvc + Try creating a snapshot on this pvc + Should fail with an appropriate error + */ + + ginkgo.It("[tkg-snapshot] Volume snapshot creation on a file-share volume on a guest cluster", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scParameters[svStorageClassName] = storagePolicyName + + ginkgo.By("Create storage class and PVC") + storageclass, pvclaim, err := createPVCAndStorageClass(client, + namespace, nil, scParameters, diskSize, nil, "", false, v1.ReadWriteMany) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to fail as invalid storage policy is specified in Storage Class") + framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)) + expectedErrMsg := "no datastores found to create file volume, vsan file service may be disabled" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + }) + + /* + PVC/Pod → Snapshot → RestoreVolume/Pod → Snapshot → Restore Vol again/Pod + + Create a Storage Class, a PVC and attach the PVC to a Pod, write a file + Create dynamically provisioned snapshots using this PVC + Create new volume using this snapshots as source, use the same SC + Ensure the PVC gets provisioned and is Bound + Attach the PVC to a Pod and ensure data from snapshot is available (file that was written in step.1 + should be available) + And also write new data to the restored volumes and it should succeed + Take a snapshot of restore volume created in step #3. + Create new volume using the snapshot as source use the same SC created in step #7 + Ensure the PVC gets provisioned and is Bound + Attach the PVC to a Pod and ensure data from snapshot is available (file that was written in + step.1 and step 5 should be available) + And also write new data to the restored volumes and it should succeed + Run cleanup: Delete snapshots, restored-volumes, pods. + */ + + ginkgo.It("[tkg-snapshot] Create restore volume snapshot in consistent order", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class and PVC") + storageclass, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaim, persistentVolumes := createPVCAndQueryVolumeInCNS(client, namespace, nil, "", + diskSize, storageclass, true) + volHandle := persistentVolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create Pod") + pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, + execRWXCommandPod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + var vmUUID string + nodeName := pod.Spec.NodeName + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod.html "} + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod' > /mnt/volume1/Pod.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim, volHandle, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore volume from snapshot created above") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + pvclaim1, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + persistentvolume1, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim1}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle1 := persistentvolume1[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle1).NotTo(gomega.BeEmpty()) + if guestCluster { + volHandle1 = getVolumeIDFromSupervisorCluster(volHandle1) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create Pod") + pod1, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim1}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod1.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod1.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod1.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle1, pod1.Spec.NodeName)) + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle1, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd = []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd = []string{"exec", pod1.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot1, snapshotContent1, snapshotCreated1, + snapshotContentCreated1, snapshotId1, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvclaim1, volHandle1, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated1 { + err = deleteVolumeSnapshotContent(ctx, snapshotContent1, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated1 { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot1.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot1.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Restore volume from snapshot created above") + pvcSpec = getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot1.Name, snapshotapigroup) + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + persistentvolume2, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim2}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := persistentvolume2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create Pod") + pod2, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim2}, false, + execRWXCommandPod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod2.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod2.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, pod2.Spec.NodeName)) + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volHandle2, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod2.html "} + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) + + wrtiecmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod2.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volHandle, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete dynamic volume snapshot") + snapshotCreated1, snapshotContentCreated1, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot1, pandoraSyncWaitTime, volHandle1, snapshotId1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) }) diff --git a/tests/e2e/csi_snapshot_file_volume.go b/tests/e2e/csi_snapshot_file_volume.go index f3468467cc..7b6a0f1e11 100644 --- a/tests/e2e/csi_snapshot_file_volume.go +++ b/tests/e2e/csi_snapshot_file_volume.go @@ -16,6 +16,8 @@ package e2e import ( "context" "fmt" + "os" + "strconv" ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -29,20 +31,21 @@ import ( fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpv "k8s.io/kubernetes/test/e2e/framework/pv" - snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" ) var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Test", func() { f := framework.NewDefaultFramework("file-snapshot") var ( - client clientset.Interface - namespace string - scParameters map[string]string - datastoreURL string - pvclaims []*v1.PersistentVolumeClaim - restConfig *restclient.Config - snapc *snapclient.Clientset + client clientset.Interface + namespace string + scParameters map[string]string + datastoreURL string + pvclaims []*v1.PersistentVolumeClaim + restConfig *restclient.Config + snapc *snapclient.Clientset + pandoraSyncWaitTime int ) ginkgo.BeforeEach(func() { @@ -61,6 +64,13 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes restConfig = getRestConfigClient() snapc, err = snapclient.NewForConfig(restConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } }) /* @@ -127,8 +137,7 @@ var _ = ginkgo.Describe("[file-vanilla-snapshot] Volume Snapshot file volume Tes defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() diff --git a/tests/e2e/csi_snapshot_negative.go b/tests/e2e/csi_snapshot_negative.go index c72fc72fcb..55d844b596 100644 --- a/tests/e2e/csi_snapshot_negative.go +++ b/tests/e2e/csi_snapshot_negative.go @@ -36,26 +36,27 @@ import ( fpv "k8s.io/kubernetes/test/e2e/framework/pv" admissionapi "k8s.io/pod-security-admission/api" - snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" ) var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injection Test", func() { f := framework.NewDefaultFramework("file-snapshot") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( - client clientset.Interface - csiNamespace string - csiReplicas int32 - isServiceStopped bool - namespace string - scParameters map[string]string - datastoreURL string - fullSyncWaitTime int - pvclaims []*v1.PersistentVolumeClaim - restConfig *restclient.Config - snapc *snapclient.Clientset - serviceName string + client clientset.Interface + csiNamespace string + csiReplicas int32 + isServiceStopped bool + namespace string + scParameters map[string]string + datastoreURL string + fullSyncWaitTime int + pvclaims []*v1.PersistentVolumeClaim + restConfig *restclient.Config + snapc *snapclient.Clientset + serviceName string + pandoraSyncWaitTime int ) ginkgo.BeforeEach(func() { @@ -93,6 +94,13 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) csiReplicas = *csiDeployment.Spec.Replicas + + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } }) ginkgo.AfterEach(func() { @@ -199,8 +207,7 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() @@ -222,38 +229,38 @@ var _ = ginkgo.Describe("[block-snapshot-negative] Volume Snapshot Fault-Injecti ginkgo.It("create volume snapshot when hostd goes down", func() { serviceName = hostdServiceName snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL, - fullSyncWaitTime, isServiceStopped, true, csiReplicas) + fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) ginkgo.It("create volume snapshot when CSI restarts", func() { serviceName = "CSI" snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL, - fullSyncWaitTime, isServiceStopped, true, csiReplicas) + fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) ginkgo.It("create volume snapshot when VPXD goes down", func() { serviceName = vpxdServiceName snapshotOperationWhileServiceDownNegative(serviceName, namespace, client, snapc, datastoreURL, - fullSyncWaitTime, isServiceStopped, csiReplicas) + fullSyncWaitTime, isServiceStopped, csiReplicas, pandoraSyncWaitTime) }) ginkgo.It("create volume snapshot when CNS goes down", func() { serviceName = vsanhealthServiceName snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL, - fullSyncWaitTime, isServiceStopped, false, csiReplicas) + fullSyncWaitTime, isServiceStopped, false, csiReplicas, pandoraSyncWaitTime) }) ginkgo.It("create volume snapshot when SPS goes down", func() { serviceName = spsServiceName snapshotOperationWhileServiceDown(serviceName, namespace, client, snapc, datastoreURL, - fullSyncWaitTime, isServiceStopped, true, csiReplicas) + fullSyncWaitTime, isServiceStopped, true, csiReplicas, pandoraSyncWaitTime) }) }) // snapshotOperationWhileServiceDown creates the volumesnapshot while the services is down func snapshotOperationWhileServiceDown(serviceName string, namespace string, client clientset.Interface, snapc *snapclient.Clientset, datastoreURL string, - fullSyncWaitTime int, isServiceStopped bool, isSnapshotCreated bool, csiReplicas int32) { + fullSyncWaitTime int, isServiceStopped bool, isSnapshotCreated bool, csiReplicas int32, pandoraSyncWaitTime int) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -318,9 +325,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot.Name, pandoraSyncWaitTime) framework.Logf("Wait till the volume snapshot is deleted") err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshot.ObjectMeta.Name) @@ -330,26 +335,29 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *snapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *snapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *snapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() if serviceName == "CSI" { ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, client) + isServiceStopped, err = stopCSIPods(ctx, client, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) @@ -443,17 +451,17 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, if serviceName == "CSI" { ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, client) + isServiceStopped, err = stopCSIPods(ctx, client, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) @@ -539,25 +547,24 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, } ginkgo.By("Deleted volume snapshot is created above") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, snapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot.Name, pandoraSyncWaitTime) snapshotCreated = false if isSnapshotCreated { if serviceName == "CSI" { ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, client) + isServiceStopped, err = stopCSIPods(ctx, client, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) @@ -639,7 +646,7 @@ func snapshotOperationWhileServiceDown(serviceName string, namespace string, // snapshotOperationWhileServiceDownNegative creates the volumesnapshot while the services is down func snapshotOperationWhileServiceDownNegative(serviceName string, namespace string, client clientset.Interface, snapc *snapclient.Clientset, datastoreURL string, - fullSyncWaitTime int, isServiceStopped bool, csiReplicas int32) { + fullSyncWaitTime int, isServiceStopped bool, csiReplicas int32, pandoraSyncWaitTime int) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var storageclass *storagev1.StorageClass @@ -702,9 +709,7 @@ func snapshotOperationWhileServiceDownNegative(serviceName string, namespace str defer func() { if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - snapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snapshot.Name, pandoraSyncWaitTime) framework.Logf("Wait till the volume snapshot is deleted") err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, snapshot.ObjectMeta.Name) @@ -714,19 +719,19 @@ func snapshotOperationWhileServiceDownNegative(serviceName string, namespace str if serviceName == "CSI" { ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, client) + isServiceStopped, err = stopCSIPods(ctx, client, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, client, csiReplicas) + isServiceStopped, err = startCSIPods(ctx, client, csiReplicas, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) diff --git a/tests/e2e/csi_snapshot_utils.go b/tests/e2e/csi_snapshot_utils.go new file mode 100644 index 0000000000..387a892aff --- /dev/null +++ b/tests/e2e/csi_snapshot_utils.go @@ -0,0 +1,726 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" +) + +// getVolumeSnapshotClassSpec returns a spec for the volume snapshot class +func getVolumeSnapshotClassSpec(deletionPolicy snapV1.DeletionPolicy, + parameters map[string]string) *snapV1.VolumeSnapshotClass { + var volumesnapshotclass = &snapV1.VolumeSnapshotClass{ + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "volumesnapshot-", + }, + Driver: e2evSphereCSIDriverName, + DeletionPolicy: deletionPolicy, + } + + volumesnapshotclass.Parameters = parameters + return volumesnapshotclass +} + +// getVolumeSnapshotSpec returns a spec for the volume snapshot +func getVolumeSnapshotSpec(namespace string, snapshotclassname string, pvcName string) *snapV1.VolumeSnapshot { + var volumesnapshotSpec = &snapV1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "snapshot-", + Namespace: namespace, + }, + Spec: snapV1.VolumeSnapshotSpec{ + VolumeSnapshotClassName: &snapshotclassname, + Source: snapV1.VolumeSnapshotSource{ + PersistentVolumeClaimName: &pvcName, + }, + }, + } + return volumesnapshotSpec +} + +// waitForVolumeSnapshotReadyToUse waits for the volume's snapshot to be in ReadyToUse +func waitForVolumeSnapshotReadyToUse(client snapclient.Clientset, ctx context.Context, namespace string, + name string) (*snapV1.VolumeSnapshot, error) { + var volumeSnapshot *snapV1.VolumeSnapshot + var err error + waitErr := wait.PollImmediate(poll, pollTimeout*2, func() (bool, error) { + volumeSnapshot, err = client.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching volumesnapshot details : %v", err) + } + if volumeSnapshot.Status != nil && *volumeSnapshot.Status.ReadyToUse { + return true, nil + } + return false, nil + }) + return volumeSnapshot, waitErr +} + +// waitForVolumeSnapshotContentToBeDeleted wait till the volume snapshot content is deleted +func waitForVolumeSnapshotContentToBeDeleted(client snapclient.Clientset, ctx context.Context, + name string) error { + var err error + waitErr := wait.PollImmediate(poll, 2*pollTimeout, func() (bool, error) { + _, err = client.SnapshotV1().VolumeSnapshotContents().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } else { + return false, fmt.Errorf("error fetching volumesnapshotcontent details : %v", err) + } + } + return false, nil + }) + return waitErr +} + +// deleteVolumeSnapshotWithPandoraWait deletes Volume Snapshot with Pandora wait for CNS to sync +func deleteVolumeSnapshotWithPandoraWait(ctx context.Context, snapc *snapclient.Clientset, + namespace string, snapshotName string, pandoraSyncWaitTime int) { + err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, snapshotName, + metav1.DeleteOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) +} + +// deleteVolumeSnapshotContentWithPandoraWait deletes Volume Snapshot Content with Pandora wait for CNS to sync +func deleteVolumeSnapshotContentWithPandoraWait(ctx context.Context, snapc *snapclient.Clientset, + snapshotContentName string, pandoraSyncWaitTime int) { + err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, snapshotContentName, metav1.DeleteOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, snapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +// waitForVolumeSnapshotContentToBeDeletedWithPandoraWait wait till the volume snapshot content is deleted +func waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx context.Context, snapc *snapclient.Clientset, + name string, pandoraSyncWaitTime int) error { + var err error + waitErr := wait.PollImmediate(poll, 2*pollTimeout, func() (bool, error) { + _, err = snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + return true, nil + } else { + return false, fmt.Errorf("error fetching volumesnapshotcontent details : %v", err) + } + } + return false, err + }) + return waitErr +} + +// waitForCNSSnapshotToBeDeleted wait till the give snapshot is deleted from CNS +func waitForCNSSnapshotToBeDeleted(volumeId string, snapshotId string) error { + var err error + waitErr := wait.PollImmediate(poll, pollTimeout, func() (bool, error) { + err = verifySnapshotIsDeletedInCNS(volumeId, snapshotId, false) + if err != nil { + if strings.Contains(err.Error(), "snapshot entry is still present") { + return false, nil + } + return false, err + } + framework.Logf("Snapshot with ID: %v for volume with ID: %v is deleted from CNS now...", snapshotId, volumeId) + return true, nil + }) + return waitErr +} + +// getVolumeSnapshotContentSpec returns a spec for the volume snapshot content +func getVolumeSnapshotContentSpec(deletionPolicy snapV1.DeletionPolicy, snapshotHandle string, + futureSnapshotName string, namespace string) *snapV1.VolumeSnapshotContent { + var volumesnapshotContentSpec = &snapV1.VolumeSnapshotContent{ + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotContent", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "snapshotcontent-", + }, + Spec: snapV1.VolumeSnapshotContentSpec{ + DeletionPolicy: deletionPolicy, + Driver: e2evSphereCSIDriverName, + Source: snapV1.VolumeSnapshotContentSource{ + SnapshotHandle: &snapshotHandle, + }, + VolumeSnapshotRef: v1.ObjectReference{ + Name: futureSnapshotName, + Namespace: namespace, + }, + }, + } + return volumesnapshotContentSpec +} + +// getVolumeSnapshotSpecByName returns a spec for the volume snapshot by name +func getVolumeSnapshotSpecByName(namespace string, snapshotName string, + snapshotcontentname string) *snapV1.VolumeSnapshot { + var volumesnapshotSpec = &snapV1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: snapshotName, + Namespace: namespace, + }, + Spec: snapV1.VolumeSnapshotSpec{ + Source: snapV1.VolumeSnapshotSource{ + VolumeSnapshotContentName: &snapshotcontentname, + }, + }, + } + return volumesnapshotSpec +} + +// createSnapshotInParallel creates snapshot for a given pvc +// in a given namespace +func createSnapshotInParallel(ctx context.Context, namespace string, + snapc *snapclient.Clientset, pvcName string, volumeSnapClassName string, + ch chan *snapV1.VolumeSnapshot, lock *sync.Mutex, wg *sync.WaitGroup) { + defer wg.Done() + framework.Logf("Waiting for a few seconds for IO to happen to pod") + time.Sleep(time.Duration(10) * time.Second) + volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpec(namespace, volumeSnapClassName, pvcName), metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + lock.Lock() + ch <- volumeSnapshot + lock.Unlock() +} + +// getSnapshotHandleFromSupervisorCluster fetches the SnapshotHandle from Supervisor Cluster +func getSnapshotHandleFromSupervisorCluster(ctx context.Context, + volumeSnapshotClass *snapV1.VolumeSnapshotClass, snapshothandle string) (string, string, string, error) { + var snapc *snapclient.Clientset + var err error + if k8senv := GetAndExpectStringEnvVar("SUPERVISOR_CLUSTER_KUBE_CONFIG"); k8senv != "" { + restConfig, err := clientcmd.BuildConfigFromFlags("", k8senv) + if err != nil { + return "", "", "", err + } + snapc, err = snapclient.NewForConfig(restConfig) + if err != nil { + return "", "", "", err + } + } + + svNamespace := GetAndExpectStringEnvVar(envSupervisorClusterNamespace) + + volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(svNamespace).Get(ctx, snapshothandle, + metav1.GetOptions{}) + if err != nil { + return "", "", "", err + } + + snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + if err != nil { + return "", "", "", err + } + + svcSnapshotHandle := *snapshotContent.Status.SnapshotHandle + snapshotID := strings.Split(svcSnapshotHandle, "+")[1] + + svcVolumeSnapshotName := volumeSnapshot.Name + + return snapshotID, svcSnapshotHandle, svcVolumeSnapshotName, nil +} + +// getRestConfigClient returns rest config client for Guest Cluster +func getRestConfigClientForGuestCluster(guestClusterRestConfig *rest.Config) *rest.Config { + var err error + if guestClusterRestConfig == nil { + if k8senv := GetAndExpectStringEnvVar("KUBECONFIG"); k8senv != "" { + guestClusterRestConfig, err = clientcmd.BuildConfigFromFlags("", k8senv) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + return guestClusterRestConfig +} + +// deleteVolumeSnapshot deletes volume snapshot from K8s side and CNS side +func deleteVolumeSnapshot(ctx context.Context, snapc *snapclient.Clientset, namespace string, + volumeSnapshot *snapV1.VolumeSnapshot, pandoraSyncWaitTime int, + volHandle string, snapshotID string) (bool, bool, error) { + var err error + + framework.Logf("Delete volume snapshot and verify the snapshot content is deleted") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + snapshotCreated := false + + framework.Logf("Wait until the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) + if err != nil { + return snapshotCreated, false, err + } + snapshotContentCreated := false + + framework.Logf("Verify snapshot entry %v is deleted from CNS for volume %v", snapshotID, volHandle) + err = waitForCNSSnapshotToBeDeleted(volHandle, snapshotID) + if err != nil { + return snapshotCreated, snapshotContentCreated, err + } + + framework.Logf("Verify snapshot entry is deleted from CNS") + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotID, false) + if err != nil { + return snapshotCreated, snapshotContentCreated, err + } + + framework.Logf("Deleting volume snapshot again to check 'Not found' error") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + return snapshotCreated, snapshotContentCreated, nil +} + +// getVolumeSnapshotIdFromSnapshotHandle fetches VolumeSnapshotId From SnapshotHandle +func getVolumeSnapshotIdFromSnapshotHandle(ctx context.Context, snapshotContent *snapV1.VolumeSnapshotContent, + volumeSnapshotClass *snapV1.VolumeSnapshotClass, volHandle string) (string, error) { + var snapshotID string + var err error + if vanillaCluster { + snapshotHandle := *snapshotContent.Status.SnapshotHandle + snapshotID = strings.Split(snapshotHandle, "+")[1] + } else if guestCluster { + snapshotHandle := *snapshotContent.Status.SnapshotHandle + snapshotID, _, _, err = getSnapshotHandleFromSupervisorCluster(ctx, volumeSnapshotClass, snapshotHandle) + if err != nil { + return "", err + } + } + return snapshotID, nil +} + +// createVolumeSnapshotClass creates VSC for a Vanilla cluster and +// fetches VSC for a Guest or Supervisor Cluster +func createVolumeSnapshotClass(ctx context.Context, snapc *snapclient.Clientset, + deletionPolicy string) (*snapV1.VolumeSnapshotClass, error) { + var volumeSnapshotClass *snapV1.VolumeSnapshotClass + var err error + if vanillaCluster { + volumeSnapshotClass, err = snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, + getVolumeSnapshotClassSpec(snapV1.DeletionPolicy(deletionPolicy), nil), metav1.CreateOptions{}) + if err != nil { + return nil, err + } + } else if guestCluster || supervisorCluster { + var volumeSnapshotClassName string + if deletionPolicy == "Delete" { + volumeSnapshotClassName = GetAndExpectStringEnvVar(envVolSnapClassDel) + } else { + framework.Failf("%s volume snapshotclass is not supported"+ + " in Supervisor or Guest Cluster", deletionPolicy) + } + waitErr := wait.PollImmediate(poll, pollTimeout, func() (bool, error) { + volumeSnapshotClass, err = snapc.SnapshotV1().VolumeSnapshotClasses().Get(ctx, + volumeSnapshotClassName, metav1.GetOptions{}) + framework.Logf("volumesnapshotclass %v, err:%v", volumeSnapshotClass, err) + if !apierrors.IsNotFound(err) && err != nil { + return false, fmt.Errorf("couldn't find "+ + "snapshotclass: %s due to error: %v", volumeSnapshotClassName, err) + } + if volumeSnapshotClass.Name != "" { + framework.Logf("Found volumesnapshotclass %s", volumeSnapshotClassName) + return true, nil + } + framework.Logf("waiting to get volumesnapshotclass %s", volumeSnapshotClassName) + return false, nil + }) + if waitErr == wait.ErrWaitTimeout { + return nil, fmt.Errorf("couldn't find volumesnapshotclass: %s in SVC", volumeSnapshotClassName) + } + } + return volumeSnapshotClass, nil +} + +// createDynamicVolumeSnapshot util creates dynamic volume snapshot for a volume +func createDynamicVolumeSnapshot(ctx context.Context, namespace string, + snapc *snapclient.Clientset, volumeSnapshotClass *snapV1.VolumeSnapshotClass, + pvclaim *v1.PersistentVolumeClaim, volHandle string, diskSize string) (*snapV1.VolumeSnapshot, + *snapV1.VolumeSnapshotContent, bool, bool, string, error) { + + volumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpec(namespace, volumeSnapshotClass.Name, pvclaim.Name), metav1.CreateOptions{}) + if err != nil { + return nil, nil, false, false, "", err + } + framework.Logf("Volume snapshot name is : %s", volumeSnapshot.Name) + + ginkgo.By("Verify volume snapshot is created") + volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) + if err != nil { + return nil, nil, false, false, "", err + } + + snapshotCreated := true + if volumeSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 { + return nil, nil, false, false, "", fmt.Errorf("unexpected restore size") + } + + ginkgo.By("Verify volume snapshot content is created") + snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + if err != nil { + return nil, nil, false, false, "", err + } + snapshotContentCreated := true + snapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, snapshotContent.Name) + if err != nil { + return nil, nil, false, false, "", fmt.Errorf("volume snapshot content is not ready to use") + } + + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshotId, err := getVolumeSnapshotIdFromSnapshotHandle(ctx, snapshotContent, volumeSnapshotClass, + volHandle) + if err != nil { + return nil, nil, false, false, "", err + } + + ginkgo.By("Query CNS and check the volume snapshot entry") + err = waitForCNSSnapshotToBeCreated(volHandle, snapshotId) + if err != nil { + return nil, nil, false, false, snapshotId, err + } + + return volumeSnapshot, snapshotContent, snapshotCreated, snapshotContentCreated, snapshotId, nil +} + +// getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim +// spec with specified storage class. +func getPersistentVolumeClaimSpecWithDatasource(namespace string, ds string, storageclass *storagev1.StorageClass, + pvclaimlabels map[string]string, accessMode v1.PersistentVolumeAccessMode, + datasourceName string, snapshotapigroup string) *v1.PersistentVolumeClaim { + disksize := diskSize + if ds != "" { + disksize = ds + } + if accessMode == "" { + // If accessMode is not specified, set the default accessMode. + accessMode = v1.ReadWriteOnce + } + claim := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-", + Namespace: namespace, + }, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + accessMode, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(disksize), + }, + }, + StorageClassName: &(storageclass.Name), + DataSource: &v1.TypedLocalObjectReference{ + APIGroup: &snapshotapigroup, + Kind: "VolumeSnapshot", + Name: datasourceName, + }, + }, + } + + if pvclaimlabels != nil { + claim.Labels = pvclaimlabels + } + + return claim +} + +/* +changeDeletionPolicyOfVolumeSnapshotContentOnGuest changes the deletion policy +of volume snapshot content from delete to retain in Guest Cluster +*/ +func changeDeletionPolicyOfVolumeSnapshotContent(ctx context.Context, + snapshotContent *snapV1.VolumeSnapshotContent, snapc *snapclient.Clientset, + namespace string, policyName snapV1.DeletionPolicy) (*snapV1.VolumeSnapshotContent, error) { + + // Retrieve the latest version of the object + latestSnapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, snapshotContent.Name, + metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // Apply changes to the latest version + latestSnapshotContent.Spec.DeletionPolicy = policyName + + // Update the object + updatedSnapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Update(ctx, + latestSnapshotContent, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + + return updatedSnapshotContent, nil +} + +/* deleteVolumeSnapshotContent deletes volume snapshot content explicitly on Guest cluster */ +func deleteVolumeSnapshotContent(ctx context.Context, updatedSnapshotContent *snapV1.VolumeSnapshotContent, + snapc *snapclient.Clientset, namespace string, pandoraSyncWaitTime int) error { + + framework.Logf("Delete volume snapshot content") + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, updatedSnapshotContent.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot content is deleted") + err := waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, updatedSnapshotContent.Name) + if err != nil { + return err + } + return nil +} + +/* createPreProvisionedSnapshotInGuestCluster created pre-provisioned snaphot on Guest cluster */ +func createPreProvisionedSnapshotInGuestCluster(ctx context.Context, volumeSnapshot *snapV1.VolumeSnapshot, + updatedSnapshotContent *snapV1.VolumeSnapshotContent, + snapc *snapclient.Clientset, namespace string, pandoraSyncWaitTime int, + svcVolumeSnapshotName string, diskSize string) (*snapV1.VolumeSnapshotContent, + *snapV1.VolumeSnapshot, bool, bool, error) { + + framework.Logf("Change the deletion policy of VolumeSnapshotContent from Delete to Retain " + + "in Guest Cluster") + updatedSnapshotContent, err := changeDeletionPolicyOfVolumeSnapshotContent(ctx, updatedSnapshotContent, + snapc, namespace, snapV1.VolumeSnapshotContentRetain) + if err != nil { + return nil, nil, false, false, fmt.Errorf("failed to change deletion policy of VolumeSnapshotContent: %v", err) + } + + framework.Logf("Delete dynamic volume snapshot from Guest Cluster") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Delete VolumeSnapshotContent from Guest Cluster explicitly") + err = deleteVolumeSnapshotContent(ctx, updatedSnapshotContent, snapc, namespace, pandoraSyncWaitTime) + if err != nil { + return nil, nil, false, false, fmt.Errorf("failed to delete VolumeSnapshotContent: %v", err) + } + + framework.Logf(fmt.Sprintf("Creating static VolumeSnapshotContent in Guest Cluster using "+ + "supervisor VolumeSnapshotName %s", svcVolumeSnapshotName)) + staticSnapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Create(ctx, + getVolumeSnapshotContentSpec(snapV1.DeletionPolicy("Delete"), svcVolumeSnapshotName, + "static-vs", namespace), metav1.CreateOptions{}) + if err != nil { + return nil, nil, false, false, fmt.Errorf("failed to create static VolumeSnapshotContent: %v", err) + } + + framework.Logf("Verify VolumeSnapshotContent is created or not in Guest Cluster") + staticSnapshotContent, err = snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + staticSnapshotContent.Name, metav1.GetOptions{}) + if err != nil { + return nil, nil, false, false, fmt.Errorf("failed to get static VolumeSnapshotContent: %v", err) + } + framework.Logf("Snapshotcontent name is %s", staticSnapshotContent.ObjectMeta.Name) + + staticSnapshotContent, err = waitForVolumeSnapshotContentReadyToUse(*snapc, ctx, staticSnapshotContent.Name) + if err != nil { + return nil, nil, false, false, fmt.Errorf("volume snapshot content is not ready to use") + } + staticSnapshotContentCreated := true + + ginkgo.By("Create a static volume snapshot by static snapshotcontent") + staticVolumeSnapshot, err := snapc.SnapshotV1().VolumeSnapshots(namespace).Create(ctx, + getVolumeSnapshotSpecByName(namespace, "static-vs", + staticSnapshotContent.ObjectMeta.Name), metav1.CreateOptions{}) + if err != nil { + return nil, nil, false, false, fmt.Errorf("failed to create static volume snapshot: %v", err) + } + framework.Logf("Volume snapshot name is : %s", staticVolumeSnapshot.Name) + + ginkgo.By("Verify static volume snapshot is created") + staticSnapshot, err := waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, staticVolumeSnapshot.Name) + if err != nil { + return nil, nil, false, false, fmt.Errorf("volumeSnapshot is still not ready to use: %v", err) + } + if staticSnapshot.Status.RestoreSize.Cmp(resource.MustParse(diskSize)) != 0 { + return nil, nil, false, false, fmt.Errorf("expected RestoreSize does not match") + } + framework.Logf("Snapshot details is %+v", staticSnapshot) + staticSnapshotCreated := true + + return staticSnapshotContent, staticSnapshot, staticSnapshotContentCreated, staticSnapshotCreated, nil +} + +// verifyVolumeRestoreOperation verifies if volume(PVC) restore from given snapshot +// and creates pod and checks attach volume operation if verifyPodCreation is set to true +func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interface, + namespace string, storageclass *storagev1.StorageClass, + volumeSnapshot *snapV1.VolumeSnapshot, + verifyPodCreation bool) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume, *v1.Pod) { + + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle2 = getVolumeIDFromSupervisorCluster(volHandle2) + } + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + + if verifyPodCreation { + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim2}, false, + execRWXCommandPod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var vmUUID string + nodeName := pod.Spec.NodeName + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle2, nodeName)) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle2, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + ginkgo.By("Verify the volume is accessible and Read/write is possible") + cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) + + wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) + gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) + return pvclaim2, persistentvolumes2, pod + } + return pvclaim2, persistentvolumes2, nil +} + +// createPVCAndQueryVolumeInCNS creates PVc with a given storage class on a given namespace +// and verifies cns metadata of that volume if verifyCNSVolume is set to true +func createPVCAndQueryVolumeInCNS(client clientset.Interface, namespace string, + pvclaimLabels map[string]string, accessMode v1.PersistentVolumeAccessMode, + ds string, storageclass *storagev1.StorageClass, + verifyCNSVolume bool) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { + pvclaim, err := createPVC(client, namespace, pvclaimLabels, ds, storageclass, accessMode) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expect claim to provision volume successfully") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + if guestCluster { + volHandle = getVolumeIDFromSupervisorCluster(volHandle) + } + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + if verifyCNSVolume { + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) + } + return pvclaim, persistentvolumes +} + +// waitForVolumeSnapshotContentReadyToUse waits for the volume's snapshot content to be in ReadyToUse +func waitForVolumeSnapshotContentReadyToUse(client snapclient.Clientset, ctx context.Context, + name string) (*snapV1.VolumeSnapshotContent, error) { + var volumeSnapshotContent *snapV1.VolumeSnapshotContent + var err error + + waitErr := wait.PollImmediate(poll, pollTimeout*2, func() (bool, error) { + volumeSnapshotContent, err = client.SnapshotV1().VolumeSnapshotContents().Get(ctx, name, metav1.GetOptions{}) + framework.Logf("volumesnapshotcontent details: %v", volumeSnapshotContent) + if err != nil { + return false, fmt.Errorf("error fetching volumesnapshotcontent details : %v", err) + } + if volumeSnapshotContent.Status != nil && *volumeSnapshotContent.Status.ReadyToUse { + framework.Logf("%s volume snapshotContent is in ready state", name) + return true, nil + } + return false, nil + }) + return volumeSnapshotContent, waitErr +} + +// waitForCNSSnapshotToBeCreated wait till the give snapshot is created in CNS +func waitForCNSSnapshotToBeCreated(volumeId string, snapshotId string) error { + var err error + waitErr := wait.PollImmediate(poll, pollTimeout*2, func() (bool, error) { + err = verifySnapshotIsCreatedInCNS(volumeId, snapshotId, false) + if err != nil { + if strings.Contains(err.Error(), "snapshot entry is not present in CNS") { + return false, nil + } + return false, err + } + framework.Logf("Snapshot with ID: %v for volume with ID: %v is created in CNS now...", snapshotId, volumeId) + return true, nil + }) + return waitErr +} diff --git a/tests/e2e/csi_static_provisioning_basic.go b/tests/e2e/csi_static_provisioning_basic.go index fc9c8b3a84..c115b2bfc5 100644 --- a/tests/e2e/csi_static_provisioning_basic.go +++ b/tests/e2e/csi_static_provisioning_basic.go @@ -34,8 +34,6 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - admissionapi "k8s.io/pod-security-admission/api" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -44,7 +42,9 @@ import ( "k8s.io/kubernetes/test/e2e/framework" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1" ) @@ -52,7 +52,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { f := framework.NewDefaultFramework("e2e-csistaticprovision") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - + framework.TestContext.DeleteNamespace = true var ( client clientset.Interface namespace string @@ -165,6 +165,10 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) } }) @@ -326,7 +330,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { ginkgo.By("Verify the volume is accessible and available to the pod by creating an empty file") filepath := filepath.Join("/mnt/volume1", "/emptyFile.txt") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify container volume metadata is present in CNS cache") @@ -436,7 +440,7 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { // Verify that fstype used to mount volume inside pod is xfs. ginkgo.By("Verify that filesystem type used to mount volume is xfs as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, xfsFSType, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/csi_static_provisioning_file_basic.go b/tests/e2e/csi_static_provisioning_file_basic.go index dfa2c6dd57..862427ee99 100644 --- a/tests/e2e/csi_static_provisioning_file_basic.go +++ b/tests/e2e/csi_static_provisioning_file_basic.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" admissionapi "k8s.io/pod-security-admission/api" @@ -192,7 +193,7 @@ var _ = ginkgo.Describe("[csi-file-vanilla] Basic File Volume Static Provisionin ginkgo.By("Verify the volume is accessible and available to the pod by creating an empty file") filepath := filepath.Join("/mnt/volume1", "/emptyFile.txt") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify container volume metadata is matching the one in CNS cache") diff --git a/tests/e2e/data_persistence.go b/tests/e2e/data_persistence.go index 15e4430d21..16843746ef 100644 --- a/tests/e2e/data_persistence.go +++ b/tests/e2e/data_persistence.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" admissionapi "k8s.io/pod-security-admission/api" ) @@ -110,11 +111,15 @@ var _ = ginkgo.Describe("Data Persistence", func() { if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) } }) ginkgo.It("[csi-block-vanilla] [csi-supervisor] [csi-guest] [csi-block-vanilla-parallelized] "+ - "Should create and delete pod with the same volume source", func() { + "Should create and delete pod with the same volume source and data", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() var sc *storagev1.StorageClass @@ -343,7 +348,7 @@ var _ = ginkgo.Describe("Data Persistence", func() { gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node %s", vmUUID) ginkgo.By("Verify that filesystem type is xfs as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, xfsFSType, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/docs/supervisor_cluster_setup.md b/tests/e2e/docs/supervisor_cluster_setup.md index 8f2c5bc4c6..aae8ea62d6 100644 --- a/tests/e2e/docs/supervisor_cluster_setup.md +++ b/tests/e2e/docs/supervisor_cluster_setup.md @@ -98,6 +98,10 @@ datacenters should be comma separated if deployed on multi-datacenters export SHARED_NFS_DATASTORE_URL="" #shared VMFS datastore url export SHARED_VMFS_DATASTORE_URL="" + #For vsan direct tests for spbm policy driven allocation tests, set following variables + export USE_VSAN_DIRECT_DATASTORE_IN_WCP="VSAN_DIRECT" + export SHARED_VSAND_DATASTORE_URL="" + export SHARED_VSAND_DATASTORE2_URL="" export BUSYBOX_IMAGE="" ### To run full sync test, need do extra following steps diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index d8a58224f0..c06c1cd663 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -77,8 +77,12 @@ const ( envSharedNFSDatastoreURL = "SHARED_NFS_DATASTORE_URL" envSharedVMFSDatastoreURL = "SHARED_VMFS_DATASTORE_URL" envSharedVMFSDatastore2URL = "SHARED_VMFS_DATASTORE2_URL" + envVsanDirectSetup = "USE_VSAN_DIRECT_DATASTORE_IN_WCP" + envVsanDDatastoreURL = "SHARED_VSAND_DATASTORE_URL" + envVsanDDatastore2URL = "SHARED_VSAND_DATASTORE2_URL" envStoragePolicyNameForNonSharedDatastores = "STORAGE_POLICY_FOR_NONSHARED_DATASTORES" envStoragePolicyNameForSharedDatastores = "STORAGE_POLICY_FOR_SHARED_DATASTORES" + envStoragePolicyNameForVsanVmfsDatastores = "STORAGE_POLICY_FOR_VSAN_VMFS_DATASTORES" envStoragePolicyNameForSharedDatastores2 = "STORAGE_POLICY_FOR_SHARED_DATASTORES_2" envStoragePolicyNameFromInaccessibleZone = "STORAGE_POLICY_FROM_INACCESSIBLE_ZONE" envStoragePolicyNameWithThickProvision = "STORAGE_POLICY_WITH_THICK_PROVISIONING" @@ -92,8 +96,11 @@ const ( envVmdkDiskURL = "DISK_URL_PATH" envVolumeOperationsScale = "VOLUME_OPS_SCALE" envComputeClusterName = "COMPUTE_CLUSTER_NAME" + envTKGImage = "TKG_IMAGE_NAME" execCommand = "/bin/df -T /mnt/volume1 | " + "/bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done" + execRWXCommandPod = "echo 'Hello message from Pod' > /mnt/volume1/Pod.html && " + + "chmod o+rX /mnt /mnt/volume1/Pod.html && while true ; do sleep 2 ; done" execRWXCommandPod1 = "echo 'Hello message from Pod1' > /mnt/volume1/Pod1.html && " + "chmod o+rX /mnt /mnt/volume1/Pod1.html && while true ; do sleep 2 ; done" execRWXCommandPod2 = "echo 'Hello message from Pod2' > /mnt/volume1/Pod2.html && " + @@ -192,8 +199,8 @@ const ( waitTimeForCNSNodeVMAttachmentReconciler = 30 * time.Second wcpServiceName = "wcp" vmcWcpHost = "10.2.224.24" //This is the LB IP of VMC WCP and its constant - devopsTKG = "test-cluster-e2e-script" - cloudadminTKG = "test-cluster-e2e-script-1" + devopsTKG = "test-cluster-e2e-script-2" + cloudadminTKG = "test-cluster-e2e-script-3" vmOperatorAPI = "/apis/vmoperator.vmware.com/v1alpha1/" devopsUser = "testuser" zoneKey = "failure-domain.beta.kubernetes.io/zone" @@ -214,17 +221,70 @@ const ( topologyLength = 5 tkgshaTopologyLevels = 1 vmcPrdEndpoint = "https://vmc.vmware.com/vmc/api/orgs/" + vsphereClusterIdConfigMapName = "vsphere-csi-cluster-id" authAPI = "https://console.cloud.vmware.com/csp/gateway/am/api/auth" + "/api-tokens/authorize" ) +/* +// test suite labels + +flaky -> label include the testcases which fails intermittently +disruptive -> label include the testcases which are disruptive in nature +vanilla -> label include the testcases for block, file, configSecret, topology etc. +stable -> label include the testcases which do not fail +longRunning -> label include the testcases which takes longer time for completion +p0 -> label include the testcases which are P0 +p1 -> label include the testcases which are P1 +p2 -> label include the testcases which are P2 +semiAutomated -> label include the testcases which are semi-automated +newTests -> label include the testcases which are newly automated +core -> label include the testcases specific to block or file +level2 -> label include the level-2 topology testcases or pipeline specific +level5 -> label include the level-5 topology testcases +customPort -> label include the testcases running on vCenter custom port +deprecated ->label include the testcases which are no longer in execution +*/ +const ( + flaky = "flaky" + disruptive = "disruptive" + wcp = "wcp" + tkg = "tkg" + vanilla = "vanilla" + topology = "topology" + preferential = "preferential" + vsphereConfigSecret = "vsphereConfigSecret" + snapshot = "snapshot" + stable = "stable" + newTests = "newTests" + multiVc = "multiVc" + block = "block" + file = "file" + core = "core" + p0 = "p0" + p1 = "p1" + p2 = "p2" + vsanStretch = "vsanStretch" + longRunning = "longRunning" + deprecated = "deprecated" + vmc = "vmc" + tkgsHA = "tkgsHA" + thickThin = "thickThin" + customPort = "customPort" + windows = "windows" + semiAutomated = "semiAutomated" + level2 = "level2" + level5 = "level5" +) + // The following variables are required to know cluster type to run common e2e // tests. These variables will be set once during test suites initialization. var ( - vanillaCluster bool - supervisorCluster bool - guestCluster bool - rwxAccessMode bool + vanillaCluster bool + supervisorCluster bool + guestCluster bool + rwxAccessMode bool + wcpVsanDirectCluster bool ) // For busybox pod image @@ -243,7 +303,6 @@ var ( migratedPluginAnnotation = "storage.alpha.kubernetes.io/migrated-plugins" pvcAnnotationStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner" pvAnnotationProvisionedBy = "pv.kubernetes.io/provisioned-by" - scAnnotation4Statefulset = "volume.beta.kubernetes.io/storage-class" nodeMapper = &NodeMapper{} ) @@ -260,11 +319,6 @@ var ( configSecretTestUser2 = "testuser2" ) -// CSI Internal FSSs -var ( - useCsiNodeID = "use-csinode-id" -) - // Nimbus generated passwords var ( nimbusK8sVmPwd = "NIMBUS_K8S_VM_PWD" @@ -292,6 +346,24 @@ var ( datastoreClusterMap = "DATASTORE_CLUSTER_MAP" ) +// For multivc +var ( + envSharedDatastoreURLVC1 = "SHARED_VSPHERE_DATASTORE_URL_VC1" + envSharedDatastoreURLVC2 = "SHARED_VSPHERE_DATASTORE_URL_VC2" + envStoragePolicyNameToDeleteLater = "STORAGE_POLICY_TO_DELETE_LATER" + envMultiVCSetupType = "MULTI_VC_SETUP_TYPE" + envStoragePolicyNameVC1 = "STORAGE_POLICY_VC1" + envStoragePolicyNameInVC1VC2 = "STORAGE_POLICY_NAME_COMMON_IN_VC1_VC2" + envPreferredDatastoreUrlVC1 = "PREFERRED_DATASTORE_URL_VC1" + envPreferredDatastoreUrlVC2 = "PREFERRED_DATASTORE_URL_VC2" +) + +// VolumeSnapshotClass env variables for tkg-snapshot +var ( + envVolSnapClassDel = "VOLUME_SNAPSHOT_CLASS_DELETE" + deletionPolicy = "Delete" +) + // GetAndExpectStringEnvVar parses a string from env variable. func GetAndExpectStringEnvVar(varName string) string { varValue := os.Getenv(varName) diff --git a/tests/e2e/file_volume_statefulsets.go b/tests/e2e/file_volume_statefulsets.go index 1303c1cfb3..f73c5ef478 100644 --- a/tests/e2e/file_volume_statefulsets.go +++ b/tests/e2e/file_volume_statefulsets.go @@ -25,13 +25,16 @@ import ( ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "golang.org/x/crypto/ssh" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" ) @@ -116,7 +119,7 @@ var _ = ginkgo.Describe("[csi-file-vanilla] File Volume statefulset", func() { statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = scName + Spec.StorageClassName = &scName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -282,7 +285,7 @@ var _ = ginkgo.Describe("[csi-file-vanilla] File Volume statefulset", func() { statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = scName + Spec.StorageClassName = &scName ginkgo.By("Creating statefulset") CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -441,7 +444,7 @@ var _ = ginkgo.Describe("[csi-file-vanilla] File Volume statefulset", func() { statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = scName + Spec.StorageClassName = &scName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -557,7 +560,7 @@ var _ = ginkgo.Describe("[csi-file-vanilla] File Volume statefulset", func() { statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = scName + Spec.StorageClassName = &scName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -580,11 +583,11 @@ var _ = ginkgo.Describe("[csi-file-vanilla] File Volume statefulset", func() { // Restart CSI daemonset ginkgo.By("Restart Daemonset") cmd := []string{"rollout", "restart", "daemonset/vsphere-csi-node", "--namespace=" + csiSystemNamespace} - framework.RunKubectlOrDie(csiSystemNamespace, cmd...) + e2ekubectl.RunKubectlOrDie(csiSystemNamespace, cmd...) ginkgo.By("Waiting for daemon set rollout status to finish") statusCheck := []string{"rollout", "status", "daemonset/vsphere-csi-node", "--namespace=" + csiSystemNamespace} - framework.RunKubectlOrDie(csiSystemNamespace, statusCheck...) + e2ekubectl.RunKubectlOrDie(csiSystemNamespace, statusCheck...) // wait for csi Pods to be in running ready state err = fpod.WaitForPodsRunningReady(client, csiSystemNamespace, int32(num_csi_pods), 0, pollTimeout, ignoreLabels) @@ -699,4 +702,118 @@ var _ = ginkgo.Describe("[csi-file-vanilla] File Volume statefulset", func() { gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") }) + + /* + Verify List volume Response on vsphere-csi-controller logs + Note: ist volume Threshold is set to 1 , and query limit set to 3 + 1. Create SC + 2. Create statefull set with 3 replica + 3. Bring down the CSI driver replica to 1 , so that it is easy to validate the List volume Response. + 4. Wait for all the PVC to reach bound and PODs to reach running state. + 5. Verify the Listvolume response in logs. It should contain all the 3 volumeID's noted in step 5 + 6. Scale up the Statefullset replica to 5 and validate the Pagination. + The 1st List volume Response will have the "token for next set:" + 7. Delete All the volumes + 8. Verify list volume response for 0 volume. + 9. Clean up the statefull set + 10. Increase the CSI driver replica to 3 + + */ + ginkgo.It("List-volumeResponseFor-fileVolumes", func() { + curtime := time.Now().Unix() + randomValue := rand.Int() + val := strconv.FormatInt(int64(randomValue), 10) + val = string(val[1:3]) + curtimestring := strconv.FormatInt(curtime, 10) + scName := "nginx-sc-default-" + curtimestring + val + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var volumesBeforeScaleUp []string + containerName := "vsphere-csi-controller" + + ginkgo.By("scale down CSI driver POD to 1 , so that it will" + + "be easy to validate all Listvolume response on one driver POD") + collectPodLogs(ctx, client, csiSystemNamespace) + scaledownCSIDriver, err := scaleCSIDriver(ctx, client, namespace, 1) + gomega.Expect(scaledownCSIDriver).To(gomega.BeTrue(), "csi driver scaledown is not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Scale up the csi-driver replica to 3") + success, err := scaleCSIDriver(ctx, client, namespace, 3) + gomega.Expect(success).To(gomega.BeTrue(), "csi driver scale up to 3 replica not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating StorageClass for Statefulset") + scParameters[scParamFsType] = nfs4FSType + scSpec := getVSphereStorageClassSpec(scName, scParameters, nil, "", "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + ginkgo.By("Creating statefulset with replica 3") + statefulset, _, volumesBeforeScaleUp := createStsDeployment(ctx, client, namespace, sc, false, + false, 0, "", v1.ReadWriteMany) + replicas := *(statefulset.Spec.Replicas) + + //List volume responses will show up in the interval of every 1 minute. + time.Sleep(pollTimeoutShort) + nimbusGeneratedK8sVmPwd := GetAndExpectStringEnvVar(nimbusK8sVmPwd) + sshClientConfig := &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + + ginkgo.By("Validate ListVolume Response for all the volumes") + logMessage := "List volume response: entries:" + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, + containerName, logMessage, volumesBeforeScaleUp) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + replicas = replicas + 2 + scaleUpStsAndVerifyPodMetadata(ctx, client, namespace, statefulset, + replicas, true, true) + + time.Sleep(pollTimeoutShort) + ginkgo.By("Validate pagination") + logMessage = "token for next set: 3" + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + replicas = 0 + ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas)) + _, scaledownErr := fss.Scale(client, statefulset, replicas) + gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) + fss.WaitForStatusReplicas(client, statefulset, replicas) + ssPodsAfterScaleDown := fss.GetPodList(client, statefulset) + gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + + pvcList := getAllPVCFromNamespace(client, namespace) + for _, pvc := range pvcList.Items { + framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace), + "Failed to delete PVC", pvc.Name) + } + //List volume responses will show up in the interval of every 2 minute. + //To see the empty response, It is required to wait for 2 min after deleteting all the PVC's + time.Sleep(time.Minute * 2) + + ginkgo.By("Validate ListVolume Response when no volumes are present") + logMessage = "ListVolumes served 0 results" + + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) }) diff --git a/tests/e2e/fullsync_test_for_block_volume.go b/tests/e2e/fullsync_test_for_block_volume.go index 0e8d65e8a4..d85b55252d 100644 --- a/tests/e2e/fullsync_test_for_block_volume.go +++ b/tests/e2e/fullsync_test_for_block_volume.go @@ -127,11 +127,16 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort ctx, cancel := context.WithCancel(context.Background()) defer cancel() + if isVsanHealthServiceStopped { + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + } if supervisorCluster { deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) } - if isVsanHealthServiceStopped { - startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } }) @@ -793,7 +798,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { ginkgo.By("create a pvc pvc1, wait for pvc bound to pv") volHandle, pvc, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -810,7 +815,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { }() ginkgo.By("create a pod pod1, using pvc1") - pod, _ := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvc, volHandle) + pod, _ := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvc, volHandle, "") defer func() { err := fpod.DeletePodWithWait(client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/gc_block_resize_retain_policy.go b/tests/e2e/gc_block_resize_retain_policy.go index 93866bbfbe..3183cd6fa6 100644 --- a/tests/e2e/gc_block_resize_retain_policy.go +++ b/tests/e2e/gc_block_resize_retain_policy.go @@ -37,6 +37,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -161,6 +162,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po gomega.Expect(err).NotTo(gomega.HaveOccurred()) svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) // Combined: @@ -211,7 +213,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -329,7 +331,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) + lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") @@ -554,7 +556,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po cmd2 = []string{"exec", pod.Name, fmt.Sprintf("--namespace=%v", namespaceNewGC), "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} - lastOutput := framework.RunKubectlOrDie(namespaceNewGC, cmd2...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespaceNewGC, cmd2...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") @@ -670,7 +672,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") diff --git a/tests/e2e/gc_block_volume_expansion.go b/tests/e2e/gc_block_volume_expansion.go index 780ec1e85c..b0c875e6f1 100644 --- a/tests/e2e/gc_block_volume_expansion.go +++ b/tests/e2e/gc_block_volume_expansion.go @@ -40,6 +40,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -146,8 +147,9 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { setResourceQuota(svcClient, svNamespace, defaultrqLimit) if isGCCSIDeploymentPODdown { - _ = updateDeploymentReplica(client, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) + _ = updateDeploymentReplica(client, 3, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) } + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) // Verify offline expansion triggers FS resize. @@ -185,7 +187,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -205,7 +207,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - _ = framework.RunKubectlOrDie(namespace, "cp", testdataFile, + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) // Delete POD. @@ -286,7 +288,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) + lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") @@ -307,7 +309,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { } ginkgo.By("Checking data consistency after PVC resize") - _ = framework.RunKubectlOrDie(namespace, "cp", + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") defer func() { op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() @@ -381,7 +383,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -493,7 +495,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) + lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") @@ -905,7 +907,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -990,7 +992,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) + lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") @@ -1185,7 +1187,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -1291,7 +1293,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) + lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") @@ -1433,7 +1435,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) rand.New(rand.NewSource(time.Now().Unix())) @@ -1447,13 +1449,13 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - _ = framework.RunKubectlOrDie(namespace, "cp", testdataFile, + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) onlineVolumeResizeCheck(f, client, namespace, svcPVCName, volHandle, pvclaim, pod) ginkgo.By("Checking data consistency after PVC resize") - _ = framework.RunKubectlOrDie(namespace, "cp", + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") defer func() { op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() @@ -1514,7 +1516,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -2038,7 +2040,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -2126,7 +2128,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) + lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") @@ -2192,7 +2194,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) + lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") @@ -2270,7 +2272,7 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) + lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) ginkgo.By("Waiting for file system resize to finish") diff --git a/tests/e2e/gc_cns_nodevm_attachment.go b/tests/e2e/gc_cns_nodevm_attachment.go index 2a9caa272f..30fb071ad4 100644 --- a/tests/e2e/gc_cns_nodevm_attachment.go +++ b/tests/e2e/gc_cns_nodevm_attachment.go @@ -67,6 +67,7 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) // Test-1 (Attach/Detach) diff --git a/tests/e2e/gc_file_share_negative.go b/tests/e2e/gc_file_share_negative.go index f8f04fba82..77b26ad209 100644 --- a/tests/e2e/gc_file_share_negative.go +++ b/tests/e2e/gc_file_share_negative.go @@ -59,6 +59,7 @@ var _ = ginkgo.Describe("[csi-guest] File Share on Non File Service enabled setu ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* diff --git a/tests/e2e/gc_full_sync.go b/tests/e2e/gc_full_sync.go index 67bdff05eb..1292f04842 100644 --- a/tests/e2e/gc_full_sync.go +++ b/tests/e2e/gc_full_sync.go @@ -83,6 +83,7 @@ var _ = ginkgo.Describe("[csi-guest] Guest cluster fullsync tests", func() { ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) // Steps: diff --git a/tests/e2e/gc_metadata_syncer.go b/tests/e2e/gc_metadata_syncer.go index bd15153b82..8738a7f4ba 100644 --- a/tests/e2e/gc_metadata_syncer.go +++ b/tests/e2e/gc_metadata_syncer.go @@ -96,6 +96,7 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { ginkgo.By(fmt.Sprintln("Starting vsan-health on the vCenter host")) startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) } + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) // Steps: diff --git a/tests/e2e/gc_rwx_basic.go b/tests/e2e/gc_rwx_basic.go index 3923f0559e..e68b2cb1ff 100644 --- a/tests/e2e/gc_rwx_basic.go +++ b/tests/e2e/gc_rwx_basic.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -44,6 +45,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() scParameters map[string]string storagePolicyName string ) + ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) @@ -57,6 +59,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() } }) + ginkgo.AfterEach(func() { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + }) + /* Test to verify file volume provision - basic tests. @@ -150,13 +157,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) }) @@ -283,25 +290,25 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd2...) - output = framework.RunKubectlOrDie(namespace, cmd2...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) @@ -394,13 +401,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Delete POD @@ -447,13 +454,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Basic File Volume Provision Test", func() ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2 file' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd2...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2 file")).NotTo(gomega.BeFalse()) }) }) diff --git a/tests/e2e/gc_rwx_deployments.go b/tests/e2e/gc_rwx_deployments.go index 0daef0b13b..16ed6c3672 100644 --- a/tests/e2e/gc_rwx_deployments.go +++ b/tests/e2e/gc_rwx_deployments.go @@ -67,6 +67,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Deployments", ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* diff --git a/tests/e2e/gc_rwx_destructive.go b/tests/e2e/gc_rwx_destructive.go index 77f039d72b..676e45f944 100644 --- a/tests/e2e/gc_rwx_destructive.go +++ b/tests/e2e/gc_rwx_destructive.go @@ -68,6 +68,7 @@ var _ = ginkgo.Describe("[rwm-csi-destructive-tkg] Statefulsets with File Volume ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -167,10 +168,11 @@ var _ = ginkgo.Describe("[rwm-csi-destructive-tkg] Statefulsets with File Volume statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") + scName := defaultNginxStorageClassName statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = defaultNginxStorageClassName + Spec.StorageClassName = &scName *statefulset.Spec.Replicas = 2 CreateStatefulSet(namespace, statefulset, clientNewGc) replicas := *(statefulset.Spec.Replicas) diff --git a/tests/e2e/gc_rwx_multi_gc.go b/tests/e2e/gc_rwx_multi_gc.go index fbb01ab52a..13726f8e5c 100644 --- a/tests/e2e/gc_rwx_multi_gc.go +++ b/tests/e2e/gc_rwx_multi_gc.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -69,6 +70,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", fu ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -220,13 +222,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", fu ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Getting the client for the second GC @@ -326,13 +328,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", fu ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespaceNewGC, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespaceNewGC, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespaceNewGC, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespaceNewGC, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespaceNewGC, wrtiecmd2...) - output = framework.RunKubectlOrDie(namespaceNewGC, cmd2...) + e2ekubectl.RunKubectlOrDie(namespaceNewGC, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespaceNewGC, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) @@ -576,13 +578,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across TKG clusters", fu ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespaceNewGC, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod2.html "} - output := framework.RunKubectlOrDie(namespaceNewGC, cmd2...) + output := e2ekubectl.RunKubectlOrDie(namespaceNewGC, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespaceNewGC, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod2.html"} - framework.RunKubectlOrDie(namespaceNewGC, wrtiecmd2...) - output = framework.RunKubectlOrDie(namespaceNewGC, cmd2...) + e2ekubectl.RunKubectlOrDie(namespaceNewGC, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespaceNewGC, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) diff --git a/tests/e2e/gc_rwx_multi_ns_gc.go b/tests/e2e/gc_rwx_multi_ns_gc.go index 0fd13aa3c0..cc79a06276 100644 --- a/tests/e2e/gc_rwx_multi_ns_gc.go +++ b/tests/e2e/gc_rwx_multi_ns_gc.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -66,6 +67,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", func( ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -327,8 +329,8 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] Volume Provision Across Namespace", func( writeCmd := []string{"exec", pod2.Name, "--namespace=" + pod2.Namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod2.html"} - framework.RunKubectlOrDie(pod2.Namespace, writeCmd...) - output := framework.RunKubectlOrDie(pod2.Namespace, cmd...) + e2ekubectl.RunKubectlOrDie(pod2.Namespace, writeCmd...) + output := e2ekubectl.RunKubectlOrDie(pod2.Namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) }) diff --git a/tests/e2e/gc_rwx_non_vsan_datastore.go b/tests/e2e/gc_rwx_non_vsan_datastore.go index 8550934fc4..10d57b1182 100644 --- a/tests/e2e/gc_rwx_non_vsan_datastore.go +++ b/tests/e2e/gc_rwx_non_vsan_datastore.go @@ -39,6 +39,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Non-VSAN datas scParameters map[string]string nonVsanStoragePolicyName string ) + ginkgo.BeforeEach(func() { client = f.ClientSet namespace = getNamespaceToRunTests(f) @@ -52,6 +53,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Non-VSAN datas } }) + ginkgo.AfterEach(func() { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + }) + /* Test to verify file volume provision on non-vsan datastore - ReadWriteMany diff --git a/tests/e2e/gc_rwx_operation_storm.go b/tests/e2e/gc_rwx_operation_storm.go index 69403a929e..29b90ad091 100644 --- a/tests/e2e/gc_rwx_operation_storm.go +++ b/tests/e2e/gc_rwx_operation_storm.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -74,6 +75,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", func() ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -205,7 +207,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", func() ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) var pods []*v1.Pod @@ -274,13 +276,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", func() message := "Hello message from Pod" + strconv.Itoa(i+2) cmd = []string{"exec", writepod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo '" + message + "'>> /mnt/volume1/Pod1.html"} - _, err = framework.RunKubectl(namespace, cmd...) + _, err = e2ekubectl.RunKubectl(namespace, cmd...) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } cmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output, err = framework.RunKubectl(namespace, cmd...) + output, err = e2ekubectl.RunKubectl(namespace, cmd...) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Output from the Pod1.html is %s", output) @@ -470,7 +472,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", func() volumePath := "'>> /mnt/volume" + strconv.Itoa(volIndex+1) + "/File.html" cmd := []string{"exec", podArray[index].Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo '" + message + volumePath} - _, err = framework.RunKubectl(namespace, cmd...) + _, err = e2ekubectl.RunKubectl(namespace, cmd...) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -480,7 +482,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Operation storm Test", func() for volIndex := range pvclaims { volumePath := "cat /mnt/volume" + strconv.Itoa(volIndex+1) + "/File.html" cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", volumePath} - output, err := framework.RunKubectl(namespace, cmd...) + output, err := e2ekubectl.RunKubectl(namespace, cmd...) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Output from the File.html is %s", output) gomega.Expect(strings.Contains(output, "Hello message from Pod")).NotTo(gomega.BeFalse()) diff --git a/tests/e2e/gc_rwx_parallel_claim.go b/tests/e2e/gc_rwx_parallel_claim.go index 6cf43919ba..cc7db8ba7d 100644 --- a/tests/e2e/gc_rwx_parallel_claim.go +++ b/tests/e2e/gc_rwx_parallel_claim.go @@ -65,6 +65,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] PVCs claiming the available resource in p ginkgo.AfterEach(func() { svcClient, svcNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svcNamespace, rqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svcNamespace) }) /* diff --git a/tests/e2e/gc_rwx_readonly.go b/tests/e2e/gc_rwx_readonly.go index c36457ffc6..4b1e996c6a 100644 --- a/tests/e2e/gc_rwx_readonly.go +++ b/tests/e2e/gc_rwx_readonly.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -57,6 +58,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( } }) + ginkgo.AfterEach(func() { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + }) + /* Test to verify Pod restricts write into PVC @@ -164,7 +170,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( crdCNSFileAccessConfig, crdVersion, crdGroup, true) ginkgo.By("Verify the volume is accessible and Read/write is possible") - _, err = framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod.Name, + _, err = e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod.Name, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html") gomega.Expect(err).To(gomega.HaveOccurred()) @@ -327,17 +333,17 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) ginkgo.By("Verify the volume is accessible and Read/write is possible") - _, err = framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod.Name, "--", + _, err = e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod.Name, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume2/Pod1.html") gomega.Expect(err).To(gomega.HaveOccurred()) @@ -435,13 +441,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Delete POD @@ -502,10 +508,10 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) - _, err = framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, "--", + _, err = e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2 file' > /mnt/volume1/Pod1.html") gomega.Expect(err).To(gomega.HaveOccurred()) }) @@ -612,13 +618,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( ginkgo.By("Verify the Read and write on volume is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Delete POD @@ -763,10 +769,10 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( ginkgo.By("Verify Read/write is possible on volume") cmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) - _, output, err = framework.RunKubectlWithFullOutput(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), + _, output, err = e2ekubectl.RunKubectlWithFullOutput(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2 file' > /mnt/volume1/Pod1.html") gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(strings.Contains(output, "Read-only file system")).To(gomega.BeTrue()) @@ -774,13 +780,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for ReadOnlyMany", func( ginkgo.By("Verify Read/write is possible on volume") cmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume2/Pod2.html "} - output = framework.RunKubectlOrDie(namespace, cmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) wrtiecmd = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume2/Pod2.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) diff --git a/tests/e2e/gc_rwx_reclaim_policy.go b/tests/e2e/gc_rwx_reclaim_policy.go index c8f9410199..755ad78d50 100644 --- a/tests/e2e/gc_rwx_reclaim_policy.go +++ b/tests/e2e/gc_rwx_reclaim_policy.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -67,6 +68,7 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", func() { ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -218,13 +220,13 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", func() { ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Delete POD @@ -352,13 +354,13 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", func() { ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) writeCmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd2...) - output = framework.RunKubectlOrDie(namespace, cmd2...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) @@ -655,13 +657,13 @@ var _ = ginkgo.Describe("File Volume Test for Reclaim Policy", func() { ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod2.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod2")).NotTo(gomega.BeFalse()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod2.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) }) diff --git a/tests/e2e/gc_rwx_security_context.go b/tests/e2e/gc_rwx_security_context.go index cd468bd996..e8b161a9dc 100644 --- a/tests/e2e/gc_rwx_security_context.go +++ b/tests/e2e/gc_rwx_security_context.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -65,6 +66,7 @@ var _ = ginkgo.Describe("File Volume Test with security context", func() { ginkgo.AfterEach(func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -200,13 +202,13 @@ var _ = ginkgo.Describe("File Volume Test with security context", func() { ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) runAsUser = 1000 @@ -251,12 +253,12 @@ var _ = ginkgo.Describe("File Volume Test with security context", func() { ginkgo.By("Verify the volume is not accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) framework.Logf("Output from the command is %s", output) writeCmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - _, err = framework.RunKubectl(namespace, writeCmd2...) + _, err = e2ekubectl.RunKubectl(namespace, writeCmd2...) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(err).To(gomega.ContainSubstring("Permission denied")) @@ -400,19 +402,19 @@ var _ = ginkgo.Describe("File Volume Test with security context", func() { ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) ginkgo.By("Verify the volume is accessible and Read/write is possible") chmodCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "chmod 444 /mnt/volume1/Pod1.html "} - _, err = framework.RunKubectl(namespace, chmodCmd...) + _, err = e2ekubectl.RunKubectl(namespace, chmodCmd...) gomega.Expect(err).NotTo(gomega.HaveOccurred()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Create a Pod to use this PVC @@ -455,18 +457,18 @@ var _ = ginkgo.Describe("File Volume Test with security context", func() { ginkgo.By("Verify the volume is not accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "ls -lh /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) framework.Logf("Output for ls -lh command is : %s", output) ginkgo.By("Verify the volume is not accessible and Read/write is possible from pod2") cmd2 = []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) writeCmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - _, err = framework.RunKubectl(namespace, writeCmd2...) + _, err = e2ekubectl.RunKubectl(namespace, writeCmd2...) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(err).To(gomega.ContainSubstring("Permission denied")) }) @@ -638,13 +640,13 @@ var _ = ginkgo.Describe("File Volume Test with security context", func() { ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) }) diff --git a/tests/e2e/gc_rwx_service_down.go b/tests/e2e/gc_rwx_service_down.go index 8bcd5342f8..9a3cac0929 100644 --- a/tests/e2e/gc_rwx_service_down.go +++ b/tests/e2e/gc_rwx_service_down.go @@ -87,6 +87,7 @@ var _ = ginkgo.Describe("File Volume Test on Service down", func() { vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) } + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* diff --git a/tests/e2e/gc_rwx_statefulsets.go b/tests/e2e/gc_rwx_statefulsets.go index fd00b067b7..b1af58b47b 100644 --- a/tests/e2e/gc_rwx_statefulsets.go +++ b/tests/e2e/gc_rwx_statefulsets.go @@ -81,6 +81,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", } svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -137,10 +138,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", }() statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") + scName := defaultNginxStorageClassName statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = defaultNginxStorageClassName + Spec.StorageClassName = &scName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -431,11 +433,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Provision with Statefulsets", }() statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") + scName := defaultNginxStorageClassName statefulset.Spec.PodManagementPolicy = apps.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = defaultNginxStorageClassName + Spec.StorageClassName = &scName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) diff --git a/tests/e2e/gc_rwx_static_provision.go b/tests/e2e/gc_rwx_static_provision.go index 0aedd70e51..ee308048e7 100644 --- a/tests/e2e/gc_rwx_static_provision.go +++ b/tests/e2e/gc_rwx_static_provision.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -57,6 +58,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", func( } }) + ginkgo.AfterEach(func() { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + }) + /* Test to verify static volume provision. @@ -160,13 +166,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", func( ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Creating label for PV. @@ -217,25 +223,25 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", func( ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) wrtiecmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd2...) - output = framework.RunKubectlOrDie(namespace, cmd2...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) @@ -330,13 +336,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", func( ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) // Delete POD @@ -401,13 +407,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume static Provision Test", func( ginkgo.By("Verify the volume is accessible and Read/write is possible from pod2") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output = framework.RunKubectlOrDie(namespace, cmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) wrtiecmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, wrtiecmd2...) - output = framework.RunKubectlOrDie(namespace, cmd2...) + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) }) diff --git a/tests/e2e/gc_rwx_syncer.go b/tests/e2e/gc_rwx_syncer.go index 190fdb9e69..e170527f7b 100644 --- a/tests/e2e/gc_rwx_syncer.go +++ b/tests/e2e/gc_rwx_syncer.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -74,6 +75,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", func ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) } + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -565,25 +567,25 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", func ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod2.html "} - output2 := framework.RunKubectlOrDie(namespace, cmd2...) + output2 := e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output2, "Hello message from Pod2")).NotTo(gomega.BeFalse()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) writeCmd2 := []string{"exec", pod2.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod2' > /mnt/volume1/Pod2.html"} - framework.RunKubectlOrDie(namespace, writeCmd2...) - output = framework.RunKubectlOrDie(namespace, cmd2...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd2...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd2...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod2")).NotTo(gomega.BeFalse()) }) @@ -750,13 +752,13 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] File Volume Test for label updates", func ginkgo.By("Verify the volume is accessible and Read/write is possible") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "cat /mnt/volume1/Pod1.html "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) writeCmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} - framework.RunKubectlOrDie(namespace, writeCmd...) - output = framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, writeCmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) }) diff --git a/tests/e2e/gc_rwx_tkg_scale.go b/tests/e2e/gc_rwx_tkg_scale.go index bd8299b618..e5ab92ab2f 100644 --- a/tests/e2e/gc_rwx_tkg_scale.go +++ b/tests/e2e/gc_rwx_tkg_scale.go @@ -78,6 +78,7 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal } svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) }) /* @@ -135,10 +136,11 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") + scName := defaultNginxStorageClassName statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = defaultNginxStorageClassName + Spec.StorageClassName = &scName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -377,11 +379,12 @@ var _ = ginkgo.Describe("[rwm-csi-tkg] TKG RWX for STS with GC worker nodes scal }() statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") + scName := defaultNginxStorageClassName statefulset.Spec.PodManagementPolicy = apps.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = v1.ReadWriteMany statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = defaultNginxStorageClassName + Spec.StorageClassName = &scName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) diff --git a/tests/e2e/gc_rwx_volume_health.go b/tests/e2e/gc_rwx_volume_health.go index b2bd9d09ab..71e598c224 100644 --- a/tests/e2e/gc_rwx_volume_health.go +++ b/tests/e2e/gc_rwx_volume_health.go @@ -73,6 +73,8 @@ var _ = ginkgo.Describe("File Volume Test volume health plumbing", func() { ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) } + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + dumpSvcNsEventsOnTestFailure(svcClient, csiSystemNamespace) }) /* diff --git a/tests/e2e/improved_csi_idempotency.go b/tests/e2e/improved_csi_idempotency.go index e39630d785..28e4ea1f0b 100644 --- a/tests/e2e/improved_csi_idempotency.go +++ b/tests/e2e/improved_csi_idempotency.go @@ -124,14 +124,6 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] "+ ginkgo.AfterEach(func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - if supervisorCluster { - deleteResourceQuota(client, namespace) - } - if guestCluster { - svcClient, svNamespace := getSvcClientAndNamespace() - setResourceQuota(svcClient, svNamespace, defaultrqLimit) - } if isServiceStopped { if serviceName == "CSI" { framework.Logf("Starting CSI driver") @@ -149,7 +141,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] "+ gomega.Expect(err).NotTo(gomega.HaveOccurred()) } else if serviceName == hostdServiceName { framework.Logf("In afterEach function to start the hostd service on all hosts") - hostIPs := getAllHostsIP(ctx) + hostIPs := getAllHostsIP(ctx, true) for _, hostIP := range hostIPs { startHostDOnHost(ctx, hostIP) } @@ -165,6 +157,16 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] "+ ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) + + if supervisorCluster { + deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } }) /* @@ -396,7 +398,7 @@ func createVolumesByReducingProvisionerTime(namespace string, client clientset.I ginkgo.By("Waiting for all claims to be in bound state") persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(client, pvclaims, - framework.ClaimProvisionTimeout) + 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: Add a logic to check for the no orphan volumes @@ -507,18 +509,18 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl csiReplicaCount := *deployment.Spec.Replicas ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, c) + isServiceStopped, err = stopCSIPods(ctx, c, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount) + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount) + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if os.Getenv(envFullSyncWaitTime) != "" { @@ -536,7 +538,7 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) } else if serviceName == hostdServiceName { ginkgo.By("Fetch IPs for the all the hosts in the cluster") - hostIPs := getAllHostsIP(ctx) + hostIPs := getAllHostsIP(ctx, true) isServiceStopped = true var wg sync.WaitGroup @@ -603,7 +605,7 @@ func createVolumeWithServiceDown(serviceName string, namespace string, client cl ginkgo.By("Waiting for all claims to be in bound state") persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(client, pvclaims, - framework.ClaimProvisionTimeout) + 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: Add a logic to check for the no orphan volumes @@ -708,7 +710,7 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl ginkgo.By("Waiting for all claims to be in bound state") persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(client, pvclaims, - framework.ClaimProvisionTimeout) + 2*framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: Add a logic to check for the no orphan volumes @@ -764,19 +766,19 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl csiReplicaCount := *deployment.Spec.Replicas ginkgo.By("Stopping CSI driver") - isServiceStopped, err = stopCSIPods(ctx, c) + isServiceStopped, err = stopCSIPods(ctx, c, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount) + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount) + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if os.Getenv(envFullSyncWaitTime) != "" { @@ -837,6 +839,7 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl // stopHostD is a function for waitGroup to run stop hostd parallelly func stopHostD(ctx context.Context, addr string, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() stopHostDOnHost(ctx, addr) } diff --git a/tests/e2e/invalid_topology_values.go b/tests/e2e/invalid_topology_values.go index 35b05abc50..81a82ece1b 100644 --- a/tests/e2e/invalid_topology_values.go +++ b/tests/e2e/invalid_topology_values.go @@ -96,7 +96,7 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With // Get the event list and verify if it contains expected error message eventList, _ := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) - expectedErrMsg := "failed to get shared datastores for topology requirement" + expectedErrMsg := "No compatible datastores found for accessibility requirements" err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) }) @@ -137,7 +137,7 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With // Get the event list and verify if it contains expected error message eventList, _ := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) - expectedErrMsg := "failed to get shared datastores for topology requirement" + expectedErrMsg := "No compatible datastores found for accessibility requirements" err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) }) @@ -177,7 +177,7 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With // Get the event list and verify if it contains expected error message eventList, _ := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) - expectedErrMsg := "failed to get shared datastores for topology requirement" + expectedErrMsg := "No compatible datastores found for accessibility requirements" err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error %q", expectedErrMsg)) }) diff --git a/tests/e2e/labelupdates.go b/tests/e2e/labelupdates.go index af6988e7f8..092e5e7a16 100644 --- a/tests/e2e/labelupdates.go +++ b/tests/e2e/labelupdates.go @@ -116,6 +116,11 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ginkgo.AfterEach(func() { if supervisorCluster { deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } }) @@ -467,10 +472,13 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ginkgo.By(fmt.Sprintf("Deleting pvc %s in namespace %s", pvc.Name, pvc.Namespace)) err = client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Waiting for some time for PVC to be deleted correctly - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow PVC deletion", oneMinuteWaitTimeInSeconds)) - time.Sleep(time.Duration(oneMinuteWaitTimeInSeconds) * time.Second) + // Waiting for PVC to be deleted correctly + ginkgo.By("Verify if PVC is deleted from namespace") + err = waitForPvcToBeDeleted(ctx, client, pvc.Name, pvc.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify labels are not present in cns") _, err = e2eVSphere.getLabelsForCNSVolume(pv.Spec.CSI.VolumeHandle, string(cnstypes.CnsKubernetesEntityTypePVC), pvc.Name, namespace) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -679,7 +687,7 @@ var _ bool = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelize ginkgo.By("Creating statefulset") statefulset := GetStatefulSetFromManifest(namespace) statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) diff --git a/tests/e2e/multi_master_k8s.go b/tests/e2e/multi_master_k8s.go index 3d92a40513..a6d68272a5 100644 --- a/tests/e2e/multi_master_k8s.go +++ b/tests/e2e/multi_master_k8s.go @@ -116,6 +116,13 @@ var _ = ginkgo.Describe("[csi-multi-master-block-e2e]", func() { ginkgo.By("Waiting for old vsphere-csi-controller pod to be removed") err = waitForControllerDeletion(client, controllerNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } else { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } }) /* diff --git a/tests/e2e/multi_vc.go b/tests/e2e/multi_vc.go new file mode 100644 index 0000000000..a06ab52f74 --- /dev/null +++ b/tests/e2e/multi_vc.go @@ -0,0 +1,1832 @@ +/* + Copyright 2023 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + cnstypes "github.com/vmware/govmomi/cns/types" + "golang.org/x/crypto/ssh" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = ginkgo.Describe("[csi-multi-vc-topology] Multi-VC", func() { + f := framework.NewDefaultFramework("csi-multi-vc") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + allowedTopologies []v1.TopologySelectorLabelRequirement + bindingMode storagev1.VolumeBindingMode + allowedTopologyLen int + nodeAffinityToSet bool + parallelStatefulSetCreation bool + stsReplicas int32 + parallelPodPolicy bool + scParameters map[string]string + topValStartIndex int + topValEndIndex int + topkeyStartIndex int + datastoreURLVC1 string + datastoreURLVC2 string + podAntiAffinityToSet bool + sshClientConfig *ssh.ClientConfig + nimbusGeneratedK8sVmPwd string + allMasterIps []string + masterIp string + scaleUpReplicaCount int32 + scaleDownReplicaCount int32 + multiVCSetupType string + isVsanHealthServiceStopped bool + stsScaleUp bool + stsScaleDown bool + verifyTopologyAffinity bool + storagePolicyInVc1 string + storagePolicyInVc1Vc2 string + storagePolicyToDelete string + isSPSServiceStopped bool + isStorageProfileDeleted bool + ) + ginkgo.BeforeEach(func() { + var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client = f.ClientSet + namespace = f.Namespace.Name + + multiVCbootstrap() + + stsScaleUp = true + stsScaleDown = true + verifyTopologyAffinity = true + + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } + + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + topologyMap := GetAndExpectStringEnvVar(topologyMap) + allowedTopologies = createAllowedTopolgies(topologyMap, topologyLength) + bindingMode = storagev1.VolumeBindingWaitForFirstConsumer + scParameters = make(map[string]string) + storagePolicyInVc1 = GetAndExpectStringEnvVar(envStoragePolicyNameVC1) + storagePolicyInVc1Vc2 = GetAndExpectStringEnvVar(envStoragePolicyNameInVC1VC2) + storagePolicyToDelete = GetAndExpectStringEnvVar(envStoragePolicyNameToDeleteLater) + datastoreURLVC1 = GetAndExpectStringEnvVar(envSharedDatastoreURLVC1) + datastoreURLVC2 = GetAndExpectStringEnvVar(envSharedDatastoreURLVC2) + nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) + multiVCSetupType = GetAndExpectStringEnvVar(envMultiVCSetupType) + + sshClientConfig = &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + + // fetching k8s master ip + allMasterIps = getK8sMasterIPs(ctx, client) + masterIp = allMasterIps[0] + + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) + fss.DeleteAllStatefulSets(client, namespace) + ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) + err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + framework.Logf("Perform cleanup of any left over stale PVs") + allPvs, err := client.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, pv := range allPvs.Items { + err := client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if isVsanHealthServiceStopped { + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[0] + ":" + sshdPort + framework.Logf("Bringing vsanhealth up before terminating the test") + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + } + + if isSPSServiceStopped { + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[0] + ":" + sshdPort + framework.Logf("Bringing sps up before terminating the test") + startVCServiceWait4VPs(ctx, vcAddress, spsServiceName, &isSPSServiceStopped) + isSPSServiceStopped = false + } + + if isStorageProfileDeleted { + clientIndex := 0 + err = createStorageProfile(masterIp, sshClientConfig, storagePolicyToDelete, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + /* TESTCASE-1 + + Stateful set with SC contains default parameters and WFC binding mode and + specific nodeaffinity details in statefullset + + Steps: + 1. Create SC default values so all the AZ's should be consided for provisioning. + 2. Create Statefulset with parallel pod management policy + 3. Wait for PVC to reach bound state and POD to reach Running state + 4. Volumes should get distributed among the nodes which are mentioned in node affinity of + Statefullset yaml + 5. Make sure common validation points are met + a) Verify the PV node affinity details should have appropriate node details + b) The Pods should be running on the appropriate nodes + c) CNS metadata + 6. Scale-up /Scale-down the statefulset and verify the common validation points on newly + created statefullset + 7. Clean up the data + */ + + ginkgo.It("Workload creation on a multivc environment with sts specified with node affinity "+ + "and SC with no allowed topology", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + parallelPodPolicy = true + nodeAffinityToSet = true + stsReplicas = 3 + scaleUpReplicaCount = 5 + scaleDownReplicaCount = 2 + + if multiVCSetupType == "multi-2vc-setup" { + /* here in 2-VC setup statefulset node affinity is taken as k8s-zone -> zone-1,zone-2,zone-3 + i.e VC1 allowed topology and VC2 partial allowed topology + */ + allowedTopologyLen = 3 + topValStartIndex = 0 + topValEndIndex = 3 + } else if multiVCSetupType == "multi-3vc-setup" { + /* here in 3-VC setup, statefulset node affinity is taken as k8s-zone -> zone-3 + i.e only VC3 allowed topology + */ + allowedTopologyLen = 1 + topValStartIndex = 2 + topValEndIndex = 3 + } + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with no allowed topolgies specified and with WFC binding mode") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, nil, "", + bindingMode, false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, + parallelPodPolicy, stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, + podAntiAffinityToSet, parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* TESTCASE-2 + + Deploy workload with allowed topology details in SC which should contain all the AZ's + so that workload will get distributed among all the VC's + + Steps: + 1. Create SC with allowedTopology details contains more than one Availability zone details + 2. Create statefulset with replica-5 + 3. Wait for PVC to reach bound state and POD to reach Running state + 4. Volumes should get distributed among all the Availability zones + 5. Make sure common validation points are met + a) Verify the PV node affinity details should have appropriate Node details + b) The Pods should be running on the appropriate nodes + c) CNS metadata + 6. Scale-up/Scale-down the statefulset and verify the common validation points on newly + created statefullset + 7. Clean up the data + */ + + ginkgo.It("Workload creation when all allowed topology specified in SC on a "+ + "multivc environment", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* here we are considering all the allowed topologies of VC1 and VC2 in case of 2-VC setup. + i.e. k8s-zone -> zone-1,zone-2,zone-3,zone-4,zone-5 + + And, all the allowed topologies of VC1, VC2 and VC3 are considered in case of 3-VC setup + i.e k8s-zone -> zone-1,zone-2,zone-3 + */ + + stsReplicas = 5 + scaleUpReplicaCount = 7 + scaleDownReplicaCount = 3 + + ginkgo.By("Create StorageClass with specific allowed topolgies details") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + TESTCASE-3 + Deploy workload with Specific storage policy name available in Single VC + + Steps: + 1. Create a SC with storage policy name available in single VC + 2. Create statefulset with replica-5 + 3. Wait for PVC to reach bound state and POD to reach Running state + 4. Volumes should get created under appropriate nodes which is accessible to the storage Policy + 5. Make sure common verification Points met in PVC, PV ad POD + a) Verify the PV node affinity details should have appropriate Node details + b) The Pods should be running on the appropriate nodes + c) CNS metadata + 6. Scale-up/Scale-down the statefulset and verify the common validation points on newly created statefullset + 7. Clean up the data + */ + + ginkgo.It("Workload creation when specific storage policy of any single VC is given in SC", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* here we are considering storage policy of VC1 and the allowed topology is k8s-zone -> zone-1 + in case of 2-VC setup and 3-VC setup + */ + + stsReplicas = 5 + scParameters[scParamStoragePolicyName] = storagePolicyInVc1 + topValStartIndex = 0 + topValEndIndex = 1 + scaleUpReplicaCount = 9 + scaleDownReplicaCount = 2 + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with storage policy specified") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-4 + Same Policy is available in two VC's + + Steps: + 1. Create a SC with Storage policy name available in VC1 and VC2 + 2. Create two Statefulset with replica-3 + 3. Wait for PVC to reach bound state and POD to reach Running state + 4. Since both the VCs have the same storage policy, volume should get distributed among all the + availability zones + 5. Make sure common verification Points met in PVC, PV ad POD + a) Verify the PV node affinity details should have appropriate Node details + b) The POD's should be running on the appropriate nodes + c) CNS metadata + 6. Scale-up/scale-down the statefulset and verify the common validation points on newly + created statefullset + 7. Clean up the data + */ + + ginkgo.It("Workload creation when storage policy available in multivc setup is given in SC", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* In case of 2-VC setup, we are considering storage policy of VC1 and VC2 and the allowed + topology is k8s-zone -> zone-1, zone-2 i.e VC1 allowed topology and partial allowed topology + of VC2 + + In case of 3-VC setup, we are considering storage policy of VC1 and VC2 and the allowed + topology is k8s-zone -> zone-1, zone-2 i.e VC1 and VC2 allowed topologies. + */ + + stsReplicas = 3 + scParameters[scParamStoragePolicyName] = storagePolicyInVc1Vc2 + topValStartIndex = 0 + topValEndIndex = 2 + sts_count := 2 + parallelStatefulSetCreation = true + scaleUpReplicaCount = 7 + scaleDownReplicaCount = 2 + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with storage policy specified") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Create 2 StatefulSet with replica count 5") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, stsReplicas) + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], + stsReplicas, &wg) + + } + wg.Wait() + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulSets[i], + namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulset and " + + "verify pv and pod affinity details") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[0], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-5 + Pod affiity tests + + Steps: + 1. Create SC default values so all the AZ's should be considered for provisioning. + 2. Create statefulset with Pod affinity rules such a way that each AZ should get atleast 1 statefulset + 3. Wait for PVC to bound and POD to reach running state + 4. Verify the stateful set distribution + 5. Make sure common verification Points met in PVC, PV ad POD + a) Verify the PV node affinity details should have appropriate Node details + b) The Pods should be running on the appropriate nodes + c) CNS metadata + 6. Scale-up/Scale-down the statefulset and verify the common validation points on newly created + statefullset + 7. Clean up data + */ + + ginkgo.It("Workload creation on a multivc environment with sts specified with pod affinity "+ + "and SC with no allowed topology", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 5 + podAntiAffinityToSet = true + scaleUpReplicaCount = 8 + scaleDownReplicaCount = 1 + + ginkgo.By("Create StorageClass with storage policy specified") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-6 + Deploy workload with allowed topology and Datastore URL + + Steps: + 1. Create a SC with allowed topology and appropriate datastore url + 2. Create Statefulset with replica-5 + 3. Wait for PVC to reach bound state and POD to reach Running state + 4. Volumes should get created under appropriate availability zone and on the specified datastore + 5. Make sure common validation points are met + 6. Verify the PV node affinity details should have appropriate node details + 7. The Pods should be running on the appropriate nodes + 8. Scale-up/Scale-down the statefulset + 9. Verify the node affinity details also verify the Pod details + 10. Clean up the data + */ + + ginkgo.It("Deploy workload with allowed topology and datastore url on a multivc environment", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* here, we are considering datastore url of VC1 in case of both 2-VC and 3-VC multi setup + so the allowed topology will be considered as - k8s-zone -> zone-1 + */ + + stsReplicas = 5 + scParameters[scParamDatastoreURL] = datastoreURLVC1 + topValStartIndex = 0 + topValEndIndex = 1 + scaleDownReplicaCount = 3 + scaleUpReplicaCount = 6 + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with allowed topology, storage-policy and with datastore url") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, allowedTopologies, "", + bindingMode, false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-7 + Deploy workload with allowed topology details in SC specific to VC1 with Immediate Binding + + Steps: + 1. Create SC with allowedTopology details set to VC1 availability zone + 2. Create statefulset with replica-3 + 3. Wait for PVC to reach bound state and POD to reach Running state + 4. Make sure common validation points are met + a) Verify the PV node affinity details should have appropriate Node details + b) The POD's should be running on the appropriate nodes which are present in VC1 + 5. Scale-up/scale-down the statefulset + 6. Verify the node affinity details. Verify the POD details. All the pods should come up on the + nodes of VC1 + 7. Clean up the data + */ + + ginkgo.It("Deploy workload with allowed topology details in SC specific "+ + "to VC1 with Immediate Binding", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* here, we are considering allowed topology of VC1 in case of both 2-VC and 3-VC multi setup + so the allowed topology will be considered as - k8s-zone -> zone-1 + */ + + stsReplicas = 3 + topValStartIndex = 0 + topValEndIndex = 1 + scaleDownReplicaCount = 0 + scaleUpReplicaCount = 6 + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with allowed topology details of VC1") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-8 + Deploy workload with allowed topology details in SC specific to VC2 + WFC binding mode + + default pod management policy + + Steps: + 1. Create SC with allowedTopology details set to VC2's Availability Zone + 2. Create statefulset with replica-3 + 3. Wait for PVC to reach bound state and Pod to reach running state + 4. Make sure common validation points are met + a) Verify the PV node affinity details should have appropriate node details + b) The Pods should be running on the appropriate nodes which are present in VC2 + 5. Scale-up/scale-down the statefulset + 6. Verify the node affinity details. Verify the pod details. All the pods should come up on the nodes of + VC2 + 7. Validate CNS metadata on appropriate VC + 8. Clean up the data + */ + + ginkgo.It("Deploy workload with allowed topology details in SC specific to VC2 with WFC "+ + "binding mode and with default pod management policy", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 3 + scaleDownReplicaCount = 2 + scaleUpReplicaCount = 5 + + if multiVCSetupType == "multi-2vc-setup" { + // here, we are considering partial allowed topology of VC2 i.e k8s-zone -> zone-3,zone-4 + topValStartIndex = 2 + topValEndIndex = 4 + } else if multiVCSetupType == "multi-3vc-setup" { + // here, we are considering allowed topology of VC2 i.e k8s-zone -> zone-2 + topValStartIndex = 1 + topValEndIndex = 2 + } + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with allowed topology details of VC2") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + bindingMode, false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-9 + Deploy workload with allowed topology details in SC specific to VC3 + parallel pod management policy + + Steps: + 1. Create SC with allowedTopology details set to VC3 availability zone + 2. Create statefulset with replica-3 + 3. Wait for PVC to reach bound state and POD to reach Running state + 4. Make sure common validation points are met + a) Verify the PV node affinity details should have appropriate Node details + b) The Pod should be running on the appropriate nodes which are present in VC3 + 5. Scale-up /scale-down the statefulset + 6. Verify the node affinity details and also verify the pod details. All the pods should come up on the nodes of + VC3 + 7. Clean up the data + */ + + ginkgo.It("Deploy workload with allowed topology details in SC specific to VC3 with "+ + "parallel pod management policy", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 3 + parallelPodPolicy = true + scaleDownReplicaCount = 1 + scaleUpReplicaCount = 6 + + if multiVCSetupType == "multi-2vc-setup" { + /* here, For 2-VC setup we will consider all allowed topology of VC1 and VC2 + i.e. k8s-zone -> zone-1,zone-2,zone-3,zone-4 */ + topValStartIndex = 0 + topValEndIndex = 4 + } else if multiVCSetupType == "multi-3vc-setup" { + /* here, For 3-VC setup we will consider allowed topology of VC3 + i.e. k8s-zone ->zone-3 */ + topValStartIndex = 2 + topValEndIndex = 3 + } + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with allowed topology details of VC3") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + bindingMode, false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + TESTCASE-10 + Deploy workload with default SC parameters with WaitForFirstConsumer + + Steps: + 1. Create a storage class with default parameters + a) SC1 with WFC + b) SC2 with Immediate + 2. Create statefulset with replica-5 using SC1 + 3. Cretate few dynamic PVCs using SC2 and create Pods using the same PVCs + 4. Wait for PVC to reach bound state and POD to reach Running state + 5. Make sure common validation points are met + a) Volumes should get distributed among all the availability zones + b) Verify the PV node affinity details should have appropriate Node details + c) The Pods should be running on the appropriate nodes + 6. Scale-up/scale-down the statefulset + 7. Clean up the data + */ + + ginkgo.It("Deploy workload with default SC parameters with WaitForFirstConsumer", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 5 + pvcCount := 5 + var podList []*v1.Pod + scaleDownReplicaCount = 3 + scaleUpReplicaCount = 7 + parallelPodPolicy = true + parallelStatefulSetCreation = true + + /* here, For 2-VC setup, we are considering all the allowed topologies of VC1 and VC2 + i.e. k8s-zone -> zone-1,zone-2,zone-3,zone-4,zone-5 + + For 3-VC setup, we are considering all the allowed topologies of VC1, VC2 and VC3 + i.e. k8s-zone -> zone-1,zone-2,zone-3 + */ + + ginkgo.By("Create StorageClass with default parameters using WFC binding mode") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, nil, "", + bindingMode, false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, + parallelPodPolicy, stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, + podAntiAffinityToSet, parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Create StorageClass with default parameters using Immediate binding mode") + storageclass, err := createStorageClass(client, nil, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Trigger multiple PVCs") + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount, nil) + + ginkgo.By("Verify PVC claim to be in bound phase and create POD for each PVC") + for i := 0; i < len(pvclaimsList); i++ { + var pvclaims []*v1.PersistentVolumeClaim + pvc, err := fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaimsList[i]}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvc).NotTo(gomega.BeEmpty()) + + pv := getPvFromClaim(client, pvclaimsList[i].Namespace, pvclaimsList[i].Name) + + ginkgo.By("Creating Pod") + pvclaims = append(pvclaims, pvclaimsList[i]) + pod, err := createPod(client, namespace, nil, pvclaims, false, "") + podList = append(podList, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + vmUUID := getNodeUUID(ctx, client, pod.Spec.NodeName) + isDiskAttached, err := multiVCe2eVSphere.verifyVolumeIsAttachedToVMInMultiVC(client, + pv.Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached") + } + defer func() { + ginkgo.By("Deleting PVC's and PV's") + for i := 0; i < len(pvclaimsList); i++ { + pv := getPvFromClaim(client, pvclaimsList[i].Namespace, pvclaimsList[i].Name) + err = fpv.DeletePersistentVolumeClaim(client, pvclaimsList[i].Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, pollTimeoutShort)) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + defer func() { + for i := 0; i < len(podList); i++ { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", podList[i].Name, namespace)) + err = fpod.DeletePodWithWait(client, podList[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Verify volume is detached from the node") + for i := 0; i < len(pvclaimsList); i++ { + pv := getPvFromClaim(client, pvclaimsList[i].Namespace, pvclaimsList[i].Name) + isDiskDetached, err := multiVCe2eVSphere.waitForVolumeDetachedFromNodeInMultiVC(client, + pv.Spec.CSI.VolumeHandle, podList[i].Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node", pv.Spec.CSI.VolumeHandle)) + } + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(podList); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], + namespace, allowedTopologies, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-11 + Create SC with single AllowedTopologyLabel + + Steps: + 1. Create a SC with specific topology details which is available in any one VC + 2. Create statefulset with replica-5 + 3. Wait for PVC to reach Bound state and Pod to reach Running state + 4. Make sure common validation points are met + a) Volumes should get created under appropriate zone + b) Verify the PV node affinity details should have appropriate node details + c) The Pods should be running on the appropriate nodes + 5. Scale-up/scale-down the statefulset + 6. Verify the node affinity details and also verify the pod details + 7. Clean up the data + */ + + ginkgo.It("Create SC with single allowed topology label on a multivc environment", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* Considering partial allowed topology of VC2 in case of 2-VC setup i.e. k8s-zone -> zone-2 + And, allowed topology of VC2 in case of 3-VC setup i.e. k8s-zone -> zone-2 + */ + + stsReplicas = 5 + topValStartIndex = 1 + topValEndIndex = 2 + parallelPodPolicy = true + scaleDownReplicaCount = 2 + scaleUpReplicaCount = 5 + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with allowed topology details of VC2") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + bindingMode, false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-12 + Create PVC using the wrong StoragePolicy name. Consider partially matching storage policy + + Steps: + 1. Use the partially matching storage policy and create PVC + 2. PVC should not go to bound, appropriate error should be shown + 3. Perform cleanup + */ + + ginkgo.It("PVC creation failure when wrong storage policy name is specified in SC", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + storagePolicyName := "shared-ds-polic" + scParameters[scParamStoragePolicyName] = storagePolicyName + + storageclass, pvclaim, err := createPVCAndStorageClass(client, + namespace, nil, scParameters, "", nil, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to fail as invalid storage policy is specified in Storage Class") + framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)) + expectedErrMsg := "failed to create volume" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + }) + + /* + TESTCASE-13 + Deploy workload With allowed topology of VC1 and datastore url which is in VC2 + + Steps: + 1. Create SC with allowed topology which matches VC1 details and datastore url which is in VC2 + 2. PVC should not go to bound, appropriate error should be shown + */ + + ginkgo.It("Deploy workload with allowed topology of VC1 and datastore url of VC2", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topValStartIndex = 0 + topValEndIndex = 1 + scParameters[scParamDatastoreURL] = datastoreURLVC2 + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + storageclass, pvclaim, err := createPVCAndStorageClass(client, + namespace, nil, scParameters, "", allowedTopologies, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to fail as non-compatible datastore url is specified in Storage Class") + framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)) + expectedErrMsg := "failed to create volume" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + }) + + /* + TESTCASE-14 + Create storage policy in VC1 and VC2 and create storage class with the same and delete Storage policy + in VC1, expected to go to VC2 + + Steps: + 1. Create Storage policy in VC1 and VC2 + 2. Create Storage class with above policy + 3. Delete storage policy from VC1 + 4. Create statefulSet + 5. Expected to provision volume on VC2 + 6. Make sure common validation points are met + 7. Clear data + */ + + ginkgo.It("Create storage policy in multivc and later delete storage policy from one of the VC", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topValStartIndex = 1 + topValEndIndex = 2 + stsReplicas = 3 + clientIndex := 0 + + scParameters[scParamStoragePolicyName] = storagePolicyToDelete + + /* Considering partial allowed topology of VC2 in case of 2-VC setup i.e. k8s-zone -> zone-2 + And, allowed topology of VC2 in case of 3-VC setup i.e. k8s-zone -> zone-2 + */ + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, + topValStartIndex, topValEndIndex) + + ginkgo.By("Create StorageClass") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Delete Storage Policy created in VC1") + err = deleteStorageProfile(masterIp, sshClientConfig, storagePolicyToDelete, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isStorageProfileDeleted = true + defer func() { + if isStorageProfileDeleted { + err = createStorageProfile(masterIp, sshClientConfig, storagePolicyToDelete, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isStorageProfileDeleted = false + } + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, _, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + }) + + /* + TESTCASE-16 + Create Deployment pod using SC with allowed topology set to Specific VC + + Steps: + 1. Create SC with allowedTopology details set to any one VC with Availability Zone + 2. Create PVC using above SC + 3. Wait for PVC to reach bound state + 4. Create deployment and wait for POD to reach Running state + 5. Make sure common validation points are met on PV,PVC and POD + a) Verify the PV node affinity details should have appropriate Node details + b) The POD's should be running on the appropriate nodes which are present in VC1 + c) Verify the node affinity details also verify the POD details. + All the POd's should come up on the nodes of VC + 6. Clean up the data + */ + + ginkgo.It("Create Deployment pod using SC with allowed topology set to specific VC", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topValStartIndex = 2 + topValEndIndex = 3 + var lables = make(map[string]string) + lables["app"] = "nginx" + replica := 1 + + /* here considering partial allowed topology of VC2 in case of 2VC setup i.e k8s-zone -> zone-3 + here considering allowed topology of VC3 in case of 3VC setup i.e k8s-zone -> zone-3 + */ + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass and PVC for Deployment") + sc, pvclaim, err := createPVCAndStorageClass(client, namespace, nil, + nil, diskSize, allowedTopologies, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Wait for PVC to be in Bound phase + pvclaims = append(pvclaims, pvclaim) + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim = nil + }() + + ginkgo.By("Create Deployments") + deployment, err := createDeployment(ctx, client, int32(replica), lables, + nil, namespace, pvclaims, "", false, nginxImage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + framework.Logf("Delete deployment set") + err := client.AppsV1().Deployments(namespace).Delete(ctx, deployment.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running " + + "on appropriate node as specified in the allowed topologies of SC") + err = verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment, + namespace, allowedTopologies, false, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + TESTCASE-17 + Create SC with invalid allowed topology details - NEGETIVE + + Steps: + 1. Create SC with invalid label details + 2. Create PVC, Pvc should not reach bound state. It should throuw error + */ + + ginkgo.It("Create SC with invalid allowed topology details", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topValStartIndex = 1 + topValEndIndex = 1 + + ginkgo.By("Set invalid allowed topology for SC") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + allowedTopologies[0].Values = []string{"new-zone"} + + storageclass, err := createStorageClass(client, nil, allowedTopologies, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvc, err := createPVC(client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if pvc != nil { + ginkgo.By("Delete the PVC") + err = fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Expect claim to fail as invalid topology label is specified in Storage Class") + framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)) + expectedErrMsg := "failed to fetch vCenter associated with topology segments" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvc.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + }) + + /* + TESTCASE-18 + Verify online and offline Volume expansion + + Steps: + 1. Create SC default values, so all the AZ's should be considered for provisioning. + 2. Create PVC and wait for it to bound + 3. Edit PVC and trigger offline volume expansion + 4. Create POD , Wait for POD to reach running state + 5. Describe PVC and verify that new size should be updated on PVC + 6. Edit the same PVC again to test Online volume expansion + 7. Wait for resize to complete + 8. Verify the newly updated size on PV and PVC + 9. Clean up the data + */ + + ginkgo.It("Offline and online volume expansion on a multivc setup", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pvclaims []*v1.PersistentVolumeClaim + + ginkgo.By("Create StorageClass") + storageclass, pvclaim, err := createPVCAndStorageClass(client, namespace, nil, nil, "", + nil, "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + pvclaims = append(pvclaims, pvclaim) + pv, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := pv[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Perform offline volume expansion on PVC") + err = performOfflineVolumeExpansin(client, pvclaim, volHandle, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var vmUUID string + nodeName := pod.Spec.NodeName + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volHandle, nodeName)) + isDiskAttached, err := multiVCe2eVSphere.verifyVolumeIsAttachedToVMInMultiVC(client, volHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is detached from the node") + isDiskDetached, err := multiVCe2eVSphere.waitForVolumeDetachedFromNodeInMultiVC(client, + pv[0].Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node", pv[0].Spec.CSI.VolumeHandle)) + }() + + ginkgo.By("Perform online volume expansion on PVC") + err = performOnlineVolumeExpansin(f, client, pvclaim, namespace, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PV node affinity and that the PODS are running " + + "on appropriate node as specified in the allowed topologies of SC") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, + namespace, allowedTopologies, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + TESTCASE-19 + Create a workload and try to reboot one of the VC - NEGETIVE + + Steps: + 1. Create SC default values, so all the AZ's should be considered for provisioning. + 2. Reboot any one VC1 + 3. Create 3 statefulset each with 5 replica + 4. Since one VC1 is rebooting, volume should get provisioned on another VC till the VC1 Comes up + 5. Wait for the statefulset, PVC's and POD's should be in up and running state + 6. After the VC came to running state scale up/down the statefull sets + 7. Newly created statefull set should get distributed among both the VC's + 8. Make sure common validation points are met on PV,PVC and POD + 9. Perform Cleanup + */ + + ginkgo.It("Create workload and reboot one of the VC", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 5 + sts_count := 3 + parallelStatefulSetCreation = true + scaleUpReplicaCount = 9 + scaleDownReplicaCount = 1 + + ginkgo.By("Create StorageClass") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Rebooting VC1") + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[0] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + err = invokeVCenterReboot(vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitForHostToBeUp(vCenterHostname[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Done with reboot") + + ginkgo.By("Create 3 StatefulSet with replica count 5") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, stsReplicas) + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], + stsReplicas, &wg) + + } + wg.Wait() + + essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} + checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + + //After reboot + multiVCbootstrap() + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulSets[i], + namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[0], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-20 + Storage policy is present in VC1 and VC1 is under reboot + + Steps: + 1. Create SC with specific storage policy , Which is present in VC1 + 2. Create Statefulsets using the above SC and reboot VC1 at the same time + 3. Since VC1 is under reboot , volume creation should be in pendig state till the VC1 is up + 4. Once the VC1 is up , all the volumes should come up on the worker nodes which is in VC1 + 5. Make sure common validation points are met on PV,PVC and POD + 6. Clean up the data + */ + + ginkgo.It("Storage policy is present in VC1 and VC1 is under reboot", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 3 + scParameters[scParamStoragePolicyName] = storagePolicyInVc1 + parallelStatefulSetCreation = true + scaleDownReplicaCount = 2 + scaleUpReplicaCount = 7 + topValStartIndex = 0 + topValEndIndex = 1 + sts_count := 2 + + // here, we are considering the allowed topology of VC1 i.e. k8s-zone -> zone-1 + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with storage policy specified") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Rebooting VC1") + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[0] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + err = invokeVCenterReboot(vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitForHostToBeUp(vCenterHostname[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Done with reboot") + + ginkgo.By("Create 3 StatefulSet with replica count 3") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, stsReplicas) + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], + stsReplicas, &wg) + + } + wg.Wait() + + essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} + checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + + //After reboot + multiVCbootstrap() + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulSets[i], + namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + for i := 0; i < len(statefulSets); i++ { + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + /* TESTCASE-21 + VSAN-health down on VC1 + + Steps: + 1. Create SC default values, so all the AZ's should be considered for provisioning. + 2. Create dynamic PVC's and Add labels to PVC's and PV's + 3. Bring down the VSAN-health on VC1 + 4. Create two statefulset each with replica 5 + 5. Since VSAN-health on VC1 is down all the volumes should get created under VC2 availability zones + 6. Verify the PV node affinity and the nodes on which Pods have come should be appropriate + 7. Update the label on PV and PVC + 8. Bring up the VSAN-health + 9. Scale up both the statefull set to 10 + 10. Wait for 2 Full sync cycle + 11. Verify CNS-metadata for the volumes that are created + 12. Verify that the volume provisioning should resume on VC1 as well + 13. Make sure common validation points are met on PV,PVC and POD + 14. Scale down the statefull sets to 1 + 15. Clean up the data + */ + + ginkgo.It("Create workloads when VSAN-health is down on VC1", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 5 + pvcCount := 3 + scaleDownReplicaCount = 1 + scaleUpReplicaCount = 10 + parallelPodPolicy = true + parallelStatefulSetCreation = true + labelKey := "volId" + labelValue := "pvcVolume" + labels := make(map[string]string) + labels[labelKey] = labelValue + sts_count := 2 + var fullSyncWaitTime int + var err error + + // Read full-sync value. + if os.Getenv(envFullSyncWaitTime) != "" { + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) + framework.Logf("Full-Sync interval time value is = %v", fullSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + /* here, For 2-VC setup, we are considering all the allowed topologies of VC1 and VC2 + i.e. k8s-zone -> zone-1,zone-2,zone-3,zone-4,zone-5 + + For 3-VC setup, we are considering all the allowed topologies of VC1, VC2 and VC3 + i.e. k8s-zone -> zone-1,zone-2,zone-3 + */ + + ginkgo.By("Create StorageClass with default parameters") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Trigger multiple PVCs") + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, labels) + + ginkgo.By("Verify PVC claim to be in bound phase and create POD for each PVC") + for i := 0; i < len(pvclaimsList); i++ { + pvc, err := fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaimsList[i]}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvc).NotTo(gomega.BeEmpty()) + + } + defer func() { + ginkgo.By("Deleting PVC's and PV's") + for i := 0; i < len(pvclaimsList); i++ { + pv := getPvFromClaim(client, pvclaimsList[i].Namespace, pvclaimsList[i].Name) + err = fpv.DeletePersistentVolumeClaim(client, pvclaimsList[i].Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, pollTimeoutShort)) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + + ginkgo.By("Bring down Vsan-health service on VC1") + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[0] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + err = invokeVCenterServiceControl(stopOperation, vsanhealthServiceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isVsanHealthServiceStopped = true + defer func() { + if isVsanHealthServiceStopped { + framework.Logf("Bringing vsanhealth up before terminating the test") + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + } + }() + + ginkgo.By("Create 2 StatefulSet with replica count 5 when vsan-health is down") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, stsReplicas) + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], + stsReplicas, &wg) + } + wg.Wait() + + labelKey = "volIdNew" + labelValue = "pvcVolumeNew" + labels = make(map[string]string) + labels[labelKey] = labelValue + + ginkgo.By("Updating labels for PVC and PV") + for i := 0; i < len(pvclaimsList); i++ { + pv := getPvFromClaim(client, pvclaimsList[i].Namespace, pvclaimsList[i].Name) + framework.Logf("Updating labels %+v for pvc %s in namespace %s", labels, pvclaimsList[i].Name, + pvclaimsList[i].Namespace) + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaimsList[i].Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvc.Labels = labels + _, err = client.CoreV1().PersistentVolumeClaims(namespace).Update(ctx, pvc, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Updating labels %+v for pv %s", labels, pv.Name) + pv.Labels = labels + _, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintln("Starting vsan-health on the vCenter host")) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow full sync finish", fullSyncWaitTime)) + time.Sleep(time.Duration(fullSyncWaitTime) * time.Second) + + ginkgo.By("Waiting for labels to be updated for PVC and PV") + for i := 0; i < len(pvclaimsList); i++ { + pv := getPvFromClaim(client, pvclaimsList[i].Namespace, pvclaimsList[i].Name) + + framework.Logf("Waiting for labels %+v to be updated for pvc %s in namespace %s", + labels, pvclaimsList[i].Name, pvclaimsList[i].Namespace) + err = multiVCe2eVSphere.waitForLabelsToBeUpdatedInMultiVC(pv.Spec.CSI.VolumeHandle, labels, + string(cnstypes.CnsKubernetesEntityTypePVC), pvclaimsList[i].Name, pvclaimsList[i].Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Waiting for labels %+v to be updated for pv %s", labels, pv.Name) + err = multiVCe2eVSphere.waitForLabelsToBeUpdatedInMultiVC(pv.Spec.CSI.VolumeHandle, labels, + string(cnstypes.CnsKubernetesEntityTypePV), pv.Name, pv.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulSets[i], + namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[0], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* TESTCASE-22 + SPS down on VC2 + use storage policy while creating statefulset + + Steps: + 1. Create SC with storage policy which is in both VCs i.e. VC1 and VC2 + 2. Create Statefulset using the same storage policy + 3. Bring down the SPS on VC1 + 4. create two statefulset each with replica 5 + 5. Since SPS on VC1 is down all the volumes should get created under VC2 availability zones + [Note: if any VC or its service is down, volume provisioning will no go through on those + Pods which are provisioned on the VC where service is down] + till all VCs came to up and running state] + 6. Verify the PV node affinity and the nodes on which Pods have come should be appropriate + 7. Bring up the SPS service + 8. Scale up both the Statefulset to 10 + 9. Verify that the volume provisioning should resume on VC1 as well + 10. Make sure common validation points are met on PV,PVC and POD + 11. Scale down the statefull sets to 1 + 12. Clean up the data + */ + + ginkgo.It("Create workloads with storage policy given in SC and when sps service is down", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* here we are considering storage policy of VC1 and VC2 so + the allowed topology to be considered as for 2-VC and 3-VC setup is k8s-zone -> zone-1,zone-2 + + */ + + stsReplicas = 5 + scParameters[scParamStoragePolicyName] = storagePolicyInVc1Vc2 + topValStartIndex = 0 + topValEndIndex = 2 + scaleUpReplicaCount = 10 + scaleDownReplicaCount = 1 + sts_count := 2 + parallelStatefulSetCreation = true + parallelPodPolicy = true + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with storage policy specified") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, parallelPodPolicy, + stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, + parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Bring down SPS service") + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[0] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + err = invokeVCenterServiceControl(stopOperation, spsServiceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isSPSServiceStopped = true + err = waitVCenterServiceToBeInState(spsServiceName, vcAddress, svcStoppedMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if isSPSServiceStopped { + framework.Logf("Bringing sps up before terminating the test") + startVCServiceWait4VPs(ctx, vcAddress, spsServiceName, &isSPSServiceStopped) + isSPSServiceStopped = false + } + }() + + ginkgo.By("Create 2 StatefulSet with replica count 5 when sps-service is down") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, stsReplicas) + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], + stsReplicas, &wg) + } + wg.Wait() + + ginkgo.By("Bringup SPS service") + startVCServiceWait4VPs(ctx, vcAddress, spsServiceName, &isSPSServiceStopped) + + framework.Logf("Waiting for %v seconds for testbed to be in the normal state", pollTimeoutShort) + time.Sleep(pollTimeoutShort) + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulSets[i], + namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[0], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) +}) diff --git a/tests/e2e/multi_vc_bootstrap.go b/tests/e2e/multi_vc_bootstrap.go new file mode 100644 index 0000000000..ff66311705 --- /dev/null +++ b/tests/e2e/multi_vc_bootstrap.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + "github.com/onsi/gomega" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" +) + +var multiVCe2eVSphere multiVCvSphere +var multiVCtestConfig *e2eTestConfig + +/* +multiVCbootstrap function takes care of initializing necessary tests context for e2e tests +*/ +func multiVCbootstrap(withoutDc ...bool) { + var err error + multiVCtestConfig, err = getConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if len(withoutDc) > 0 { + if withoutDc[0] { + (*multiVCtestConfig).Global.Datacenters = "" + } + } + multiVCe2eVSphere = multiVCvSphere{ + multivcConfig: multiVCtestConfig, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // connects and verifies multiple VC connections + connectMultiVC(ctx, &multiVCe2eVSphere) + + if framework.TestContext.RepoRoot != "" { + testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot}) + } + framework.TestContext.Provider = "vsphere" +} diff --git a/tests/e2e/multi_vc_config_secret.go b/tests/e2e/multi_vc_config_secret.go new file mode 100644 index 0000000000..1a036ba143 --- /dev/null +++ b/tests/e2e/multi_vc_config_secret.go @@ -0,0 +1,1059 @@ +/* + Copyright 2023 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "golang.org/x/crypto/ssh" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = ginkgo.Describe("[csi-multi-vc-config-secret] Multi-VC-Config-Secret", func() { + f := framework.NewDefaultFramework("multi-vc-config-secret") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + csiNamespace string + csiReplicas int32 + podAntiAffinityToSet bool + stsScaleUp bool + stsScaleDown bool + verifyTopologyAffinity bool + allowedTopologies []v1.TopologySelectorLabelRequirement + scaleUpReplicaCount int32 + scaleDownReplicaCount int32 + allowedTopologyLen int + nodeAffinityToSet bool + parallelStatefulSetCreation bool + stsReplicas int32 + parallelPodPolicy bool + originalVC1PasswordChanged bool + originalVC3PasswordChanged bool + vCenterIP string + vCenterUser string + vCenterPassword string + vCenterPort string + dataCenter string + err error + revertToOriginalVsphereConf bool + multiVCSetupType string + allMasterIps []string + sshClientConfig *ssh.ClientConfig + nimbusGeneratedK8sVmPwd string + revertToOriginalCsiYaml bool + newNamespace *v1.Namespace + ) + + ginkgo.BeforeEach(func() { + var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client = f.ClientSet + namespace = f.Namespace.Name + multiVCbootstrap() + + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } + + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + verifyTopologyAffinity = true + stsScaleUp = true + stsScaleDown = true + + // read namespace + csiNamespace = GetAndExpectStringEnvVar(envCSINamespace) + csiDeployment, err := client.AppsV1().Deployments(csiNamespace).Get( + ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicas = *csiDeployment.Spec.Replicas + + // read testbed topology map + topologyMap := GetAndExpectStringEnvVar(topologyMap) + allowedTopologies = createAllowedTopolgies(topologyMap, topologyLength) + + // save original vsphere conf credentials in temp variable + vCenterIP = multiVCe2eVSphere.multivcConfig.Global.VCenterHostname + vCenterUser = multiVCe2eVSphere.multivcConfig.Global.User + vCenterPassword = multiVCe2eVSphere.multivcConfig.Global.Password + vCenterPort = multiVCe2eVSphere.multivcConfig.Global.VCenterPort + dataCenter = multiVCe2eVSphere.multivcConfig.Global.Datacenters + + // read type of multi-vc setup + multiVCSetupType = GetAndExpectStringEnvVar(envMultiVCSetupType) + nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) + + sshClientConfig = &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + + // fetching k8s master ip + allMasterIps = getK8sMasterIPs(ctx, client) + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if revertToOriginalVsphereConf { + ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + + "and its credentials") + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if originalVC1PasswordChanged { + clientIndex := 0 + vcAddress := strings.Split(vCenterIP, ",")[0] + ":" + sshdPort + username := strings.Split(vCenterUser, ",")[0] + originalPassword := strings.Split(vCenterPassword, ",")[0] + newPassword := e2eTestPassword + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(username, newPassword, originalPassword, vcAddress, true, + clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if originalVC3PasswordChanged { + clientIndex2 := 2 + vcAddress3 := strings.Split(vCenterIP, ",")[2] + ":" + sshdPort + username3 := strings.Split(vCenterUser, ",")[2] + originalPassword3 := strings.Split(vCenterPassword, ",")[2] + newPassword3 := "Admin!23" + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(username3, newPassword3, originalPassword3, vcAddress3, true, + clientIndex2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC3PasswordChanged = false + } + + if revertToOriginalCsiYaml { + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + + ginkgo.By("Recreate config secret on a default csi system namespace") + err = deleteVsphereConfigSecret(client, ctx, newNamespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = createVsphereConfigSecret(csiNamespace, vsphereCfg, sshClientConfig, allMasterIps) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Revert vsphere CSI driver on a default csi system namespace") + err = setNewNameSpaceInCsiYaml(client, sshClientConfig, newNamespace.Name, csiNamespace, allMasterIps) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + /* TESTCASE-1 + Change VC password on one of the VC, also update the vsphere-csi-secret + + // Steps + 1. Create SC with all topologies present in allowed Topology list + 2. Change the VC UI password for any one VC, Update the same in "CSI config secret" file and re-create the + secret + 3. Re-start the CSI driver + 4. Wait for some time, CSI will auto identify the change in vsphere-secret file and get updated + 5. Create Statefulset. PVCs and Pods should be in bound and running state. + 6. Make sure all the common verification points are met + a) Verify node affinity on all the PV's + b) Verify that POD should be up and running on the appropriate nodes + 7. Scale-up/Scale-down the statefulset + 8. Clean up the data + */ + + ginkgo.It("Change vCenter password on one of the multi-vc setup and update the same "+ + "in csi vsphere conf", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientIndex := 0 + stsReplicas = 3 + scaleUpReplicaCount = 5 + scaleDownReplicaCount = 2 + + ginkgo.By("Create StorageClass with all allowed topolgies set") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // read original vsphere config secret + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vcAddress := strings.Split(vsphereCfg.Global.VCenterHostname, ",")[0] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + username := strings.Split(vsphereCfg.Global.User, ",")[0] + originalPassword := strings.Split(vsphereCfg.Global.Password, ",")[0] + newPassword := e2eTestPassword + ginkgo.By(fmt.Sprintf("Original password %s, new password %s", originalPassword, newPassword)) + + ginkgo.By("Changing password on the vCenter VC1 host") + err = invokeVCenterChangePassword(username, originalPassword, newPassword, vcAddress, true, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC1PasswordChanged = true + + // here 0 indicates that we are updating only VC1 password + ginkgo.By("Create vsphere-config-secret file with new VC1 password") + passwordList := strings.Split(vsphereCfg.Global.Password, ",") + passwordList[0] = newPassword + vsphereCfg.Global.Password = strings.Join(passwordList, ",") + + // here we are writing new password of VC1 and later updating vsphere config secret + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = true + defer func() { + if originalVC1PasswordChanged { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(username, newPassword, originalPassword, vcAddress, true, + clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC1PasswordChanged = false + } + + if revertToOriginalVsphereConf { + ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + + "and its credentials") + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = false + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, + parallelPodPolicy, stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, + podAntiAffinityToSet, parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* Testcase-2 + copy same VC details twice in csi-vsphere conf - NEGETIVE + + In csi-vsphere conf, copy VC1's details twice , mention VC2 details once and create secret + Observe the system behaviour , Expectation is CSI pod's should show CLBO or should show error + */ + + ginkgo.It("Copy same vCenter details twice in csi vsphere conf in a multi-vc setup", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // read original vsphere config secret + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + /* here we are updating only vCenter IP and vCenter Password, rest all other vCenter credentials will + remain same + VC1 and VC3 credentials will be same in conf secret, VC2 will be having its exact credentials + + Note: For this scenario, we will not be doing any restart of service + */ + + ginkgo.By("Create vsphere-config-secret file with same VC credential details 2 times") + if multiVCSetupType == "multi-2vc-setup" { + // copying VC1 IP to VC2 IP + vCenterIPList := strings.Split(vsphereCfg.Global.VCenterHostname, ",") + vCenterIPList[1] = vCenterIPList[0] + vsphereCfg.Global.VCenterHostname = strings.Join(vCenterIPList, ",") + + // assigning new Password to VC2 + passwordList := strings.Split(vsphereCfg.Global.Password, ",") + passwordList[1] = e2eTestPassword + vsphereCfg.Global.Password = strings.Join(passwordList, ",") + } else if multiVCSetupType == "multi-3vc-setup" { + // copying VC1 IP to VC3 IP + vCenterIPList := strings.Split(vsphereCfg.Global.VCenterHostname, ",") + vCenterIPList[2] = vCenterIPList[0] + vsphereCfg.Global.VCenterHostname = strings.Join(vCenterIPList, ",") + + // assigning new Password to VC3 + passwordList := strings.Split(vsphereCfg.Global.Password, ",") + passwordList[2] = e2eTestPassword + vsphereCfg.Global.Password = strings.Join(passwordList, ",") + } + + /* here we are copying VC1 credentials to VC3 credentials in case of 3-VC setup and + VC1 credentials to VC2 credentials in case of 2-VC setup and later updating vsphere config secret */ + + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = true + defer func() { + if revertToOriginalVsphereConf { + ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + + "and its credentials") + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = false + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + framework.Logf("Wait for %v for csi to auto-detect the config secret changes", pollTimeout) + time.Sleep(pollTimeout) + + framework.Logf("Verify CSI Pods are in CLBO state") + deploymentPods, err := client.AppsV1().Deployments(csiNamespace).Get(ctx, vSphereCSIControllerPodNamePrefix, + metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if deploymentPods.Status.UnavailableReplicas != 0 { + framework.Logf("CSI Pods are in CLBO state") + } + + ginkgo.By("Try to create a PVC verify that it is stuck in pending state") + storageclass, pvclaim, err := createPVCAndStorageClass(client, namespace, nil, nil, "", + allowedTopologies, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to fail as invalid storage policy is specified in Storage Class") + framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)) + expectedErrMsg := "waiting for a volume to be created" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + }) + + /* Testcase-3 + In csi-vsphere conf, use VC-hostname instead of VC-IP for one VC and try to switch the same during + a workload vcreation + + 1. In csi-vsphere conf, for VC1 use VC-IP, for VC2 use VC-hostname + 2. Create SC + 3. Create Statefulset of replica 10 + 4. Try to switch the VC-ip and VC-hostname in config secret and re-create + 5. Reboot CSI-driver to consider the new change + 6. Make sure Statefulset creation should be successful + 7. Verify the node affinity rules + 8. Verify the list-volume response in CSI logs + [This will be covered in list volume testcase] + 9. Clean up the data + */ + + ginkgo.It("Use VC-hostname instead of VC-IP for one VC and try to switch the same during"+ + "a workload vcreation in a multivc setup", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 10 + scaleUpReplicaCount = 5 + scaleDownReplicaCount = 2 + + ginkgo.By("Create StorageClass with all allowed topolgies set") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // read original vsphere config secret + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Use VC-IP for VC1 and VC-hostname for VC2 in a multi VC setup") + vCenterList := strings.Split(vsphereCfg.Global.VCenterHostname, ",") + vCenterIPVC2 := vCenterList[1] + + ginkgo.By("Fetch vcenter hotsname for VC2") + vCenterHostName := getVcenterHostName(vCenterList[1]) + vCenterList[1] = vCenterHostName + vsphereCfg.Global.VCenterHostname = strings.Join(vCenterList, ",") + + // here we are writing hostname of VC2 and later updating vsphere config secret + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = true + defer func() { + if revertToOriginalVsphereConf { + ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + + "and its credentials") + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = false + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, + parallelPodPolicy, stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, + podAntiAffinityToSet, parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteService(namespace, client, service) + fss.DeleteAllStatefulSets(client, namespace) + }() + + ginkgo.By("Use VC-IP for VC2 and VC-hostname for VC1 in a multi VC setup") + ginkgo.By("Fetch vcenter hotsname of VC1") + vCenterHostNameVC1 := getVcenterHostName(vCenterList[0]) + vCenterList[0] = vCenterHostNameVC1 + vCenterList[1] = vCenterIPVC2 + vsphereCfg.Global.VCenterHostname = strings.Join(vCenterList, ",") + + // here we are writing hostname of VC1 and later updating vsphere config secret + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = true + + ginkgo.By("Restart CSI driver") + restartSuccess, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* Testcase-4 + Install CSI driver on different namespace and restart CSI-controller and node daemon sets in + between the statefulset creation + + 1. Create SC with allowedTopology details contains all availability zones + 2. Create 3 Statefulset with replica-5 + 3. Re-start the CSI controller Pod and node-daemon sets + 4. Wait for PVC to reach bound state and POD to reach Running state + 5. Volumes should get distributed among all the Availability zones + 6. Verify the PV node affinity details should have appropriate node details + 7. The Pods should be running on the appropriate nodes + 8. Scale-up/scale-down the statefulset + 9. Verify the node affinity details also verify the POD details + 10. Clean up the data + */ + + ginkgo.It("Install CSI driver on different namespace and restart CSI-controller and node daemon sets"+ + "in between the statefulset creation", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 5 + sts_count := 3 + ignoreLabels := make(map[string]string) + parallelStatefulSetCreation = true + + // read original vsphere config secret + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a new namespace") + newNamespace, err = createNamespace(client, ctx, "test-ns") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete newly created namespace") + err = deleteNamespace(client, ctx, newNamespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Delete config secret created on csi namespace") + err = deleteVsphereConfigSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create config secret on a new namespace") + err = createVsphereConfigSecret(newNamespace.Name, vsphereCfg, sshClientConfig, allMasterIps) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Install vsphere CSI driver on different test namespace") + err = setNewNameSpaceInCsiYaml(client, sshClientConfig, csiNamespace, newNamespace.Name, allMasterIps) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalCsiYaml = true + defer func() { + if revertToOriginalCsiYaml { + ginkgo.By("Recreate config secret on a default csi system namespace") + err = deleteVsphereConfigSecret(client, ctx, newNamespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = createVsphereConfigSecret(csiNamespace, vsphereCfg, sshClientConfig, allMasterIps) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Revert vsphere CSI driver on a system csi namespace") + err = setNewNameSpaceInCsiYaml(client, sshClientConfig, newNamespace.Name, csiNamespace, allMasterIps) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + revertToOriginalCsiYaml = false + } + }() + + ginkgo.By("Create StorageClass with all allowed topolgies set") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Creating multiple StatefulSets specs in parallel") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, stsReplicas) + + ginkgo.By("Trigger multiple StatefulSets creation in parallel") + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], stsReplicas, &wg) + if i == 1 { + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, newNamespace.Name, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + wg.Wait() + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulset and verify pv and pod affinity details") + for i := 0; i < len(statefulSets); i++ { + if i == 0 { + stsScaleUp = false + scaleDownReplicaCount = 3 + framework.Logf("Scale down StatefulSet1 replica count to 3") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if i == 1 { + scaleUpReplicaCount = 9 + stsScaleDown = false + framework.Logf("Scale up StatefulSet2 replica count to 9") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + if i == 2 { + framework.Logf("Scale up StatefulSet3 replica count to 13 and later scale it down " + + "to replica count 2 and in between restart node daemon set") + scaleUpReplicaCount = 13 + scaleDownReplicaCount = 2 + + // Fetch the number of CSI pods running before restart + list_of_pods, err := fpod.GetPodsInNamespace(client, newNamespace.Name, ignoreLabels) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + num_csi_pods := len(list_of_pods) + + // Collecting and dumping csi pod logs before restrating CSI daemonset + collectPodLogs(ctx, client, newNamespace.Name) + + // Restart CSI daemonset + ginkgo.By("Restart Daemonset") + cmd := []string{"rollout", "restart", "daemonset/vsphere-csi-node", "--namespace=" + newNamespace.Name} + e2ekubectl.RunKubectlOrDie(newNamespace.Name, cmd...) + + ginkgo.By("Waiting for daemon set rollout status to finish") + statusCheck := []string{"rollout", "status", "daemonset/vsphere-csi-node", "--namespace=" + newNamespace.Name} + e2ekubectl.RunKubectlOrDie(newNamespace.Name, statusCheck...) + + // wait for csi Pods to be in running ready state + err = fpod.WaitForPodsRunningReady(client, newNamespace.Name, int32(num_csi_pods), 0, pollTimeout, ignoreLabels) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + }) + + /* Testcase-5 + In vsphere-config, keep different passwords on each VC and check Statefulset creation and reboot VC + + 1. Set different passwords to VC's in config secret . + 2. Re-start csi driver pods + 3. Wait for some time for csi to pick the changes on vsphere-secret file + 4. Create Statefulsets + 5. Reboot any one VC + 6. Revert the original VC password and vsphere conf, but this time do not restart csi driver + 6. Scale up/Scale-down the statefulset + 7. Verify the node affinity + 8. Clean up the data + */ + + ginkgo.It("Keep different passwords on each VC and check Statefulset creation and reboot VC", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stsReplicas = 3 + scaleUpReplicaCount = 5 + scaleDownReplicaCount = 2 + var clientIndex2 int + var username3, newPassword3, originalPassword3, vcAddress3 string + + // read original vsphere config secret + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create StorageClass with all allowed topolgies set") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // read VC1 credentials + vcAddress1 := strings.Split(vsphereCfg.Global.VCenterHostname, ",")[0] + ":" + sshdPort + username1 := strings.Split(vsphereCfg.Global.User, ",")[0] + originalPassword1 := strings.Split(vsphereCfg.Global.Password, ",")[0] + newPassword1 := "E2E-test-password!23" + + ginkgo.By("Changing password on the vCenter VC1 host") + clientIndex0 := 0 + err = invokeVCenterChangePassword(username1, originalPassword1, newPassword1, vcAddress1, true, clientIndex0) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC1PasswordChanged = true + + if multiVCSetupType == "multi-3vc-setup" { + // read VC3 credentials + vcAddress3 = strings.Split(vsphereCfg.Global.VCenterHostname, ",")[2] + ":" + sshdPort + username3 = strings.Split(vsphereCfg.Global.User, ",")[2] + originalPassword3 = strings.Split(vsphereCfg.Global.Password, ",")[2] + newPassword3 = "Admin!23" + + ginkgo.By("Changing password on the vCenter VC3 host") + clientIndex2 = 2 + err = invokeVCenterChangePassword(username3, originalPassword3, newPassword3, vcAddress3, true, clientIndex2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC3PasswordChanged = true + + ginkgo.By("Create vsphere-config-secret file with new VC1 and new VC2 password in 3-VC setup") + passwordList := strings.Split(vsphereCfg.Global.Password, ",") + passwordList[0] = newPassword1 + passwordList[2] = newPassword3 + vsphereCfg.Global.Password = strings.Join(passwordList, ",") + } else if multiVCSetupType == "multi-2vc-setup" { + ginkgo.By("Create vsphere-config-secret file with new VC1 password in 2-VC setup") + passwordList := strings.Split(vsphereCfg.Global.Password, ",") + passwordList[0] = newPassword1 + vsphereCfg.Global.Password = strings.Join(passwordList, ",") + } + + // here we are writing new password of VC1 and VC2 and later updating vsphere config secret + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = true + defer func() { + if originalVC1PasswordChanged { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(username1, newPassword1, originalPassword1, vcAddress1, true, + clientIndex0) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC1PasswordChanged = false + } + + if multiVCSetupType == "multi-3vc-setup" { + if originalVC3PasswordChanged { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(username3, newPassword3, originalPassword3, vcAddress3, true, + clientIndex2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC3PasswordChanged = false + } + } + + if revertToOriginalVsphereConf { + ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + + "and its credentials") + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = false + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, + parallelPodPolicy, stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, + podAntiAffinityToSet, parallelStatefulSetCreation, false, "", "", nil, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Rebooting VC2") + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[1] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + err = invokeVCenterReboot(vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitForHostToBeUp(vCenterHostname[1]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Done with reboot") + + essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} + checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + + ginkgo.By("Reverting the password change on VC1") + err = invokeVCenterChangePassword(username1, newPassword1, originalPassword1, vcAddress1, true, + clientIndex0) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC1PasswordChanged = false + + if multiVCSetupType == "multi-3vc-setup" { + ginkgo.By("Reverting the password change on VC3") + err = invokeVCenterChangePassword(username3, newPassword3, originalPassword3, vcAddress3, true, + clientIndex2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC3PasswordChanged = false + } + + ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + + "and its credentials") + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = false + + framework.Logf("Wait for %v for csi driver to auto-detect the changes", pollTimeout) + time.Sleep(pollTimeout) + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /*Testcase-6 + Change VC in the UI but not on the vsphere secret + 1. Create SC + 2. Without updating the vsphere secret on the VC password + 3. Create a statefulset + 4. Until driver restarts all the workflows will go fine + 5. Statefulset creation should be successful + 6. Restart the driver + 7. There should be error while provisioning volume on the VC on which password has updated + 8. Clean up the data + */ + + ginkgo.It("Change VC in the UI but not on the vsphere secret and verify "+ + "volume creation workflow on a multivc setup", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientIndex := 0 + + ginkgo.By("Changing password on the vCenter VC1 host") + // read original vsphere config secret + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vcAddress := strings.Split(vsphereCfg.Global.VCenterHostname, ",")[0] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + username := strings.Split(vsphereCfg.Global.User, ",")[0] + originalPassword := strings.Split(vsphereCfg.Global.Password, ",")[0] + newPassword := e2eTestPassword + ginkgo.By(fmt.Sprintf("Original password %s, new password %s", originalPassword, newPassword)) + err = invokeVCenterChangePassword(username, originalPassword, newPassword, vcAddress, true, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC1PasswordChanged = true + defer func() { + if originalVC1PasswordChanged { + ginkgo.By("Reverting the password change") + err = invokeVCenterChangePassword(username, newPassword, originalPassword, vcAddress, true, + clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + originalVC1PasswordChanged = false + } + }() + + framework.Logf("Wait for %v for csi driver to auto-detect the changes", pollTimeout) + time.Sleep(pollTimeout) + + framework.Logf("Verify CSI Pods are in CLBO state") + deploymentPods, err := client.AppsV1().Deployments(csiNamespace).Get(ctx, vSphereCSIControllerPodNamePrefix, + metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if deploymentPods.Status.UnavailableReplicas != 0 { + framework.Logf("CSI Pods are in CLBO state") + } + + ginkgo.By("Try to create a PVC verify that it is stuck in pending state") + storageclass, pvclaim, err := createPVCAndStorageClass(client, namespace, nil, nil, "", + allowedTopologies, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to fail as invalid storage policy is specified in Storage Class") + framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)) + expectedErrMsg := "waiting for a volume to be created" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + }) + + /* Testcase-7 + Make a wrong entry in vsphere conf for one of the VC and create a secret + Logs will have error messages, But driver still be up and running . + until driver restarts , volume creation will be fine + After re-starting CSI driver it should throw error, should not come to running state until the error in + vsphere-secret is fixed + */ + + ginkgo.It("Add any wrong entry in vsphere conf and verify csi pods behaviour", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wrongPortNoVC1 := "337" + + // read original vsphere config secret + vsphereCfg, err := readVsphereConfSecret(client, ctx, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create vsphere-config-secret file with wrong port number in VC1") + portList := strings.Split(vsphereCfg.Global.VCenterPort, ",") + portList[0] = wrongPortNoVC1 + vsphereCfg.Global.VCenterPort = strings.Join(portList, ",") + + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = true + defer func() { + if revertToOriginalVsphereConf { + ginkgo.By("Reverting back csi-vsphere.conf with its original vcenter user " + + "and its credentials") + vsphereCfg.Global.VCenterHostname = vCenterIP + vsphereCfg.Global.User = vCenterUser + vsphereCfg.Global.Password = vCenterPassword + vsphereCfg.Global.VCenterPort = vCenterPort + vsphereCfg.Global.Datacenters = dataCenter + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + revertToOriginalVsphereConf = false + + ginkgo.By("Restart CSI driver") + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + framework.Logf("Wait for %v to see the CSI Pods ready running status after wrong entry in vsphere conf", pollTimeout) + time.Sleep(pollTimeout) + + framework.Logf("Verify CSI Pods status") + deploymentPods, err := client.AppsV1().Deployments(csiNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if deploymentPods.Status.AvailableReplicas == csiReplicas { + framework.Logf("CSi Pods are in ready running state") + } + + ginkgo.By("Restart CSI controller pod") + err = updateDeploymentReplicawithWait(client, 0, vSphereCSIControllerPodNamePrefix, csiNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = updateDeploymentReplicawithWait(client, csiReplicas, vSphereCSIControllerPodNamePrefix, csiNamespace) + if err != nil { + if strings.Contains(err.Error(), "error waiting for deployment") { + framework.Logf("csi pods are not in ready state") + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + framework.Logf("Verify CSI Pods status now") + deploymentPods, err = client.AppsV1().Deployments(csiNamespace).Get(ctx, + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if deploymentPods.Status.UnavailableReplicas != csiReplicas { + framework.Logf("CSi Pods are not ready or in CLBO state with %d unavilable csi pod replica") + } + + ginkgo.By("Try to create a PVC verify that it is stuck in pending state") + storageclass, pvclaim, err := createPVCAndStorageClass(client, namespace, nil, nil, "", + allowedTopologies, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to fail as invalid storage policy is specified in Storage Class") + framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)) + expectedErrMsg := "waiting for a volume to be created" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + }) + +}) diff --git a/tests/e2e/multi_vc_connection.go b/tests/e2e/multi_vc_connection.go new file mode 100644 index 0000000000..52dffd25d2 --- /dev/null +++ b/tests/e2e/multi_vc_connection.go @@ -0,0 +1,101 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + neturl "net/url" + "strings" + + gomega "github.com/onsi/gomega" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/vim25" + "k8s.io/kubernetes/test/e2e/framework" +) + +/* +connectMultiVC helps make a connection to a multiple vCenter Server. No actions are taken if a connection +exists and alive. Otherwise, a new client will be created. +*/ +func connectMultiVC(ctx context.Context, vs *multiVCvSphere) { + clientLock.Lock() + defer clientLock.Unlock() + if vs.multiVcClient == nil { + framework.Logf("Creating new VC session") + vs.multiVcClient = newClientForMultiVC(ctx, vs) + } + for i := 0; i < len(vs.multiVcClient); i++ { + manager := session.NewManager(vs.multiVcClient[i].Client) + userSession, err := manager.UserSession(ctx) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if userSession != nil { + continue + } else { + framework.Logf("Current session is not valid or not authenticated, trying to logout from it") + err = vs.multiVcClient[i].Logout(ctx) + if err != nil { + framework.Logf("Ignoring the log out error: %v", err) + } + framework.Logf("Creating new client session after attempting to logout from existing session") + vs.multiVcClient = newClientForMultiVC(ctx, vs) + } + } +} + +/* +newClientForMultiVC creates a new client for vSphere connection on a multivc environment +*/ +func newClientForMultiVC(ctx context.Context, vs *multiVCvSphere) []*govmomi.Client { + var clients []*govmomi.Client + configUser := strings.Split(vs.multivcConfig.Global.User, ",") + configPwd := strings.Split(vs.multivcConfig.Global.Password, ",") + configvCenterHostname := strings.Split(vs.multivcConfig.Global.VCenterHostname, ",") + configvCenterPort := strings.Split(vs.multivcConfig.Global.VCenterPort, ",") + for i := 0; i < len(configvCenterHostname); i++ { + framework.Logf("https://%s:%s/sdk", configvCenterHostname[i], configvCenterPort[i]) + url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", + configvCenterHostname[i], configvCenterPort[i])) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + url.User = neturl.UserPassword(configUser[i], configPwd[i]) + client, err := govmomi.NewClient(ctx, url, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = client.UseServiceVersion(vsanNamespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(roundTripperDefaultCount)) + clients = append(clients, client) + } + return clients +} + +/* +connectMultiVcCns creates a CNS client for the virtual center for a multivc environment +*/ +func connectMultiVcCns(ctx context.Context, vs *multiVCvSphere) error { + var err error + clientMutex.Lock() + defer clientMutex.Unlock() + if vs.multiVcCnsClient == nil { + vs.multiVcCnsClient = make([]*cnsClient, len(vs.multiVcClient)) + for i := 0; i < len(vs.multiVcClient); i++ { + vs.multiVcCnsClient[i], err = newCnsClient(ctx, vs.multiVcClient[i].Client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + return nil +} diff --git a/tests/e2e/multi_vc_multi_replica.go b/tests/e2e/multi_vc_multi_replica.go new file mode 100644 index 0000000000..ca4fe2d268 --- /dev/null +++ b/tests/e2e/multi_vc_multi_replica.go @@ -0,0 +1,259 @@ +/* + Copyright 2023 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "sync" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "golang.org/x/crypto/ssh" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = ginkgo.Describe("[csi-multi-vc-topology] Multi-VC-Replica", func() { + f := framework.NewDefaultFramework("csi-multi-vc") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + allowedTopologies []v1.TopologySelectorLabelRequirement + sshClientConfig *ssh.ClientConfig + nimbusGeneratedK8sVmPwd string + statefulSetReplicaCount int32 + k8sVersion string + stsScaleUp bool + stsScaleDown bool + verifyTopologyAffinity bool + parallelStatefulSetCreation bool + scaleUpReplicaCount int32 + scaleDownReplicaCount int32 + ) + ginkgo.BeforeEach(func() { + var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client = f.ClientSet + namespace = f.Namespace.Name + + multiVCbootstrap() + + stsScaleUp = true + stsScaleDown = true + verifyTopologyAffinity = true + parallelStatefulSetCreation = true + + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } + + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + topologyMap := GetAndExpectStringEnvVar(topologyMap) + allowedTopologies = createAllowedTopolgies(topologyMap, topologyLength) + nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) + + sshClientConfig = &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + + // fetching k8s version + v, err := client.Discovery().ServerVersion() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + k8sVersion = v.Major + "." + v.Minor + + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) + fss.DeleteAllStatefulSets(client, namespace) + ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) + err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + framework.Logf("Perform cleanup of any left over stale PVs") + allPvs, err := client.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, pv := range allPvs.Items { + err = client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + /* + Verify the behaviour when CSI Provisioner, CSI Attacher, Vsphere syncer is deleted repeatedly + during workload creation + + 1. Identify the CSI-Controller-Pod where CSI Provisioner, CSI Attacher and vsphere-syncer are the leader + 2. Create SC with allowed topology set to different availability zones spread across multiple VC's + 3. Create three Statefulsets each with replica 5 + 4. While the Statefulsets is creating PVCs and Pods, kill CSI-Provisioner, CSI-attacher identified + in the step 1 + 5. Wait for some time for all the PVCs and Pods to come up + 6. Verify the PV node affinity and the nodes on which Pods have come should be appropriate + 7. Scale-up/Scale-down the Statefulset and kill the vsphere-syncer identified in the step 1 + 8. Wait for some time, Statefulset workload is patched with proper count + 9. Make sure common validation points are met on PV,PVC and POD + 10. Clean up the data + */ + + ginkgo.It("Verify behaviour when CSI-Provisioner, CSI-Attacher, Vsphere-Syncer is "+ + "deleted repeatedly during workload creation in multivc", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sts_count := 3 + statefulSetReplicaCount = 5 + + ginkgo.By("Get current leader where CSI-Provisioner, CSI-Attacher and " + + "Vsphere-Syncer is running and find the master node IP where these containers are running") + csiProvisionerLeader, csiProvisionerControlIp, err := getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + client, sshClientConfig, provisionerContainerName) + framework.Logf("CSI-Provisioner is running on Leader Pod %s "+ + "which is running on master node %s", csiProvisionerLeader, csiProvisionerControlIp) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + csiAttacherLeaderleader, csiAttacherControlIp, err := getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + client, sshClientConfig, attacherContainerName) + framework.Logf("CSI-Attacher is running on Leader Pod %s "+ + "which is running on master node %s", csiAttacherLeaderleader, csiAttacherControlIp) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vsphereSyncerLeader, vsphereSyncerControlIp, err := getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx, + client, sshClientConfig, syncerContainerName) + framework.Logf("Vsphere-Syncer is running on Leader Pod %s "+ + "which is running on master node %s", vsphereSyncerLeader, vsphereSyncerControlIp) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create SC with allowed topology spread across multiple VCs") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Creating multiple StatefulSets specs in parallel") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, statefulSetReplicaCount) + + ginkgo.By("Trigger multiple StatefulSets creation in parallel. During StatefulSets " + + "creation, kill CSI-Provisioner, CSI-Attacher container in between") + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], statefulSetReplicaCount, &wg) + if i == 1 { + ginkgo.By("Kill CSI-Provisioner container") + err = execDockerPauseNKillOnContainer(sshClientConfig, csiProvisionerControlIp, provisionerContainerName, + k8sVersion) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if i == 2 { + ginkgo.By("Kill CSI-Attacher container") + err = execDockerPauseNKillOnContainer(sshClientConfig, csiAttacherControlIp, attacherContainerName, + k8sVersion) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + wg.Wait() + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Perform scaleup/scaledown operation on statefulset and verify pv and pod affinity details") + for i := 0; i < len(statefulSets); i++ { + if i == 0 { + stsScaleUp = false + scaleDownReplicaCount = 3 + framework.Logf("Scale down StatefulSet1 replica count to 3") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if i == 1 { + scaleUpReplicaCount = 9 + stsScaleDown = false + framework.Logf("Scale up StatefulSet2 replica count to 9 and in between " + + "kill vsphere syncer container") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Kill Vsphere-Syncer container") + err = execDockerPauseNKillOnContainer(sshClientConfig, vsphereSyncerControlIp, syncerContainerName, + k8sVersion) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + if i == 2 { + framework.Logf("Scale up StatefulSet3 replica count to 7 and later scale down" + + "the replica count to 2") + scaleUpReplicaCount = 7 + scaleDownReplicaCount = 2 + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + }) +}) diff --git a/tests/e2e/multi_vc_preferential_topology.go b/tests/e2e/multi_vc_preferential_topology.go new file mode 100644 index 0000000000..3ea2765570 --- /dev/null +++ b/tests/e2e/multi_vc_preferential_topology.go @@ -0,0 +1,791 @@ +/* + Copyright 2023 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "golang.org/x/crypto/ssh" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" + admissionapi "k8s.io/pod-security-admission/api" + + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" +) + +var _ = ginkgo.Describe("[csi-multi-vc-preferential-topology] Multi-VC-Preferential-Topology", func() { + f := framework.NewDefaultFramework("multi-vc-preferential-topology") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + preferredDatastoreChosen int + allMasterIps []string + masterIp string + preferredDatastorePaths []string + allowedTopologyRacks []string + sshClientConfig *ssh.ClientConfig + nimbusGeneratedK8sVmPwd string + allowedTopologies []v1.TopologySelectorLabelRequirement + ClusterdatastoreListVc []map[string]string + ClusterdatastoreListVc1 map[string]string + ClusterdatastoreListVc2 map[string]string + ClusterdatastoreListVc3 map[string]string + parallelStatefulSetCreation bool + stsReplicas int32 + scaleDownReplicaCount int32 + scaleUpReplicaCount int32 + stsScaleUp bool + stsScaleDown bool + verifyTopologyAffinity bool + topValStartIndex int + topValEndIndex int + topkeyStartIndex int + scParameters map[string]string + storagePolicyInVc1Vc2 string + allowedTopologyLen int + parallelPodPolicy bool + nodeAffinityToSet bool + podAntiAffinityToSet bool + snapc *snapclient.Clientset + pandoraSyncWaitTime int + err error + csiNamespace string + csiReplicas int32 + ) + + ginkgo.BeforeEach(func() { + var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client = f.ClientSet + namespace = f.Namespace.Name + + multiVCbootstrap() + + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) + sshClientConfig = &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + + stsScaleUp = true + stsScaleDown = true + verifyTopologyAffinity = true + scParameters = make(map[string]string) + storagePolicyInVc1Vc2 = GetAndExpectStringEnvVar(envStoragePolicyNameInVC1VC2) + topologyMap := GetAndExpectStringEnvVar(topologyMap) + allowedTopologies = createAllowedTopolgies(topologyMap, topologyLength) + + //Get snapshot client using the rest config + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // fetching k8s master ip + allMasterIps = getK8sMasterIPs(ctx, client) + masterIp = allMasterIps[0] + + // fetching cluster details + clientIndex := 0 + clusterComputeResource, _, err = getClusterNameForMultiVC(ctx, &multiVCe2eVSphere, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // fetching list of datastores available in different VCs + ClusterdatastoreListVc1, ClusterdatastoreListVc2, + ClusterdatastoreListVc3, err = getDatastoresListFromMultiVCs(masterIp, sshClientConfig, + clusterComputeResource[0], true) + ClusterdatastoreListVc = append(ClusterdatastoreListVc, ClusterdatastoreListVc1, + ClusterdatastoreListVc2, ClusterdatastoreListVc3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + + allowedTopologyRacks = nil + for i := 0; i < len(allowedTopologies); i++ { + for j := 0; j < len(allowedTopologies[i].Values); j++ { + allowedTopologyRacks = append(allowedTopologyRacks, allowedTopologies[i].Values[j]) + } + } + + // read namespace + csiNamespace = GetAndExpectStringEnvVar(envCSINamespace) + csiDeployment, err := client.AppsV1().Deployments(csiNamespace).Get( + ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicas = *csiDeployment.Spec.Replicas + + //set preferred datatsore time interval + setPreferredDatastoreTimeInterval(client, ctx, csiNamespace, csiReplicas, true) + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) + fss.DeleteAllStatefulSets(client, namespace) + ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) + err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + framework.Logf("Perform preferred datastore tags cleanup after test completion") + err = deleteTagCreatedForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, + true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Recreate preferred datastore tags post cleanup") + err = createTagForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* Testcase-1: + Add preferential tag in all the Availability zone's of VC1 and VC2 → change the preference during + execution + + 1. Create SC default parameters without any topology requirement. + 2. In each availability zone for any one datastore add preferential tag in VC1 and VC2 + 3. Create 3 statefulset with 10 replica's + 4. Wait for all the PVC to bound and pods to reach running state + 5. Verify that since the preferred datastore is available, Volume should get created on the datastores + which has the preferencce set + 6. Make sure common validation points are met on PV,PVC and POD + 7. Change the Preference in any 2 datastores + 8. Scale up the statefulset to 15 replica + 9. The volumes should get provision on the datastores which has the preference + 10. Clear the data + */ + ginkgo.It("TestTag one datastore as preferred each in VC1 and VC2 and verify it is honored", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + parallelStatefulSetCreation = true + preferredDatastoreChosen = 1 + preferredDatastorePaths = nil + sts_count := 3 + stsReplicas = 10 + var dsUrls []string + scaleUpReplicaCount = 15 + stsScaleDown = false + + ginkgo.By("Create SC with default parameters") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, nil, nil, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // choose preferred datastore + ginkgo.By("Tag preferred datastore for volume provisioning in VC1 and VC2") + for i := 0; i < 2; i++ { + paths, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologies[0].Values[i], + preferredDatastoreChosen, ClusterdatastoreListVc[i], nil, true, i) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + preferredDatastorePaths = append(preferredDatastorePaths, paths...) + + // Get the length of the paths for the current iteration + pathsLen := len(paths) + + for j := 0; j < pathsLen; j++ { + // Calculate the index for ClusterdatastoreListVc based on the current iteration + index := i + j + + if val, ok := ClusterdatastoreListVc[index][paths[j]]; ok { + dsUrls = append(dsUrls, val) + } + } + } + + framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", + preferredDatastoreTimeOutInterval) + time.Sleep(preferredDatastoreTimeOutInterval) + + ginkgo.By("Create service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Create 3 statefulset with 10 replicas") + statefulSets := createParallelStatefulSetSpec(namespace, sts_count, stsReplicas) + var wg sync.WaitGroup + wg.Add(sts_count) + for i := 0; i < len(statefulSets); i++ { + go createParallelStatefulSets(client, namespace, statefulSets[i], + stsReplicas, &wg) + + } + wg.Wait() + + ginkgo.By("Waiting for StatefulSets Pods to be in Ready State") + err = waitForStsPodsToBeInReadyRunningState(ctx, client, namespace, statefulSets) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + }() + + ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") + for i := 0; i < len(statefulSets); i++ { + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulSets[i], + namespace, allowedTopologies, parallelStatefulSetCreation, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Verify volume is provisioned on the preferred datatsore") + for i := 0; i < len(statefulSets); i++ { + err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulSets[i], namespace, + preferredDatastorePaths, nil, true, true, true, dsUrls) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Remove preferred datatsore tag which is chosen for volume provisioning") + for i := 0; i < len(preferredDatastorePaths); i++ { + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], + allowedTopologies[0].Values[i], true, i) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + var preferredDatastorePathsNew []string + ginkgo.By("Tag new preferred datastore for volume provisioning in VC1 and VC2") + for i := 0; i < 2; i++ { + paths, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologies[0].Values[i], + preferredDatastoreChosen, ClusterdatastoreListVc[i], preferredDatastorePaths, true, i) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + preferredDatastorePathsNew = append(preferredDatastorePathsNew, paths...) + pathsLen := len(paths) + for j := 0; j < pathsLen; j++ { + index := i + j + if val, ok := ClusterdatastoreListVc[index][paths[j]]; ok { + dsUrls = append(dsUrls, val) + } + } + } + preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastorePathsNew...) + defer func() { + ginkgo.By("Remove preferred datastore tag") + for i := 0; i < len(preferredDatastorePathsNew); i++ { + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePathsNew[i], + allowedTopologies[0].Values[i], true, i) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + for i := 0; i < len(statefulSets); i++ { + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulSets[i], parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Verify volume is provisioned on the preferred datatsore") + for i := 0; i < len(statefulSets); i++ { + err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulSets[i], namespace, + preferredDatastorePaths, nil, true, true, true, dsUrls) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + /* Testcase-2: + Create SC with storage policy available in VC1 and VC2, set the preference in VC1 datastore only + + Steps// + 1. Create storage policy with same name on both VC1 and VC2 + 2. Add preference tag on the datastore which is on VC1 only + 3. Create statefulset using the above policy + 4. Since the preference tag is added in VC1, volume provisioning should happned on VC1's datastore only + [no, first preference will be given to Storage Policy mentioned in the Storage Class] + 5. Make sure common validation points are met on PV,PVC and POD + 6. Reboot VC1 + 7. Scale up the stateful set to replica 15 → What should be the behaviour here + 8. Since the VC1 is presently in reboot state, new volumes should start coming up on VC2 + Once VC1 is up again the datastore preference should take preference + [no, until all VCs comes up PVC provision will be stuck in Pending state] + 9. Verify the node affinity on all PV's + 10. Make sure POD has come up on appropriate nodes . + 11. Clean up the data + */ + + ginkgo.It("Create SC with storage policy available in VC1 and VC2 and set the "+ + "preference in VC1 datastore only", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + /* here we are considering storage policy of VC1 and VC2 and the allowed topology is k8s-zone -> zone-1 + in case of 2-VC setup and 3-VC setup + */ + + stsReplicas = 1 + scParameters[scParamStoragePolicyName] = storagePolicyInVc1Vc2 + topValStartIndex = 0 + topValEndIndex = 2 + var dsUrls []string + stsScaleDown = false + scaleUpReplicaCount = 7 + var multiVcClientIndex = 0 + preferredDatastoreChosen = 1 + preferredDatastorePaths = nil + + datastoreURLVC1 := GetAndExpectStringEnvVar(envPreferredDatastoreUrlVC1) + datastoreURLVC2 := GetAndExpectStringEnvVar(envPreferredDatastoreUrlVC2) + + /* + fetching datstore details passed in the storage policy + Note: Since storage policy is specified in the SC, the first preference for volume provisioning + will be given to the datastore given in the storage profile and second preference will then be + given to the preferred datastore chosen + */ + for _, vCenterList := range ClusterdatastoreListVc { + for _, url := range vCenterList { + if url == datastoreURLVC1 || url == datastoreURLVC2 { + dsUrls = append(dsUrls, url) + } + } + } + + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + ginkgo.By("Create StorageClass with storage policy specified") + scSpec := getVSphereStorageClassSpec(defaultNginxStorageClassName, scParameters, allowedTopologies, "", + "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // choose preferred datastore + ginkgo.By("Tag preferred datastore for volume provisioning in VC1") + preferredDatastorePaths, err := tagPreferredDatastore(masterIp, sshClientConfig, + allowedTopologies[0].Values[0], + preferredDatastoreChosen, ClusterdatastoreListVc[0], nil, true, multiVcClientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pathsLen := len(preferredDatastorePaths) + for j := 0; j < pathsLen; j++ { + if val, ok := ClusterdatastoreListVc[0][preferredDatastorePaths[j]]; ok { + dsUrls = append(dsUrls, val) + } + } + defer func() { + ginkgo.By("Remove preferred datastore tag") + for i := 0; i < len(preferredDatastorePaths); i++ { + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], + allowedTopologies[0].Values[0], true, i) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", + preferredDatastoreTimeOutInterval) + time.Sleep(preferredDatastoreTimeOutInterval) + + ginkgo.By("Create StatefulSet and verify pv affinity and pod affinity details") + service, statefulset, err := createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx, client, namespace, + parallelPodPolicy, stsReplicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, + podAntiAffinityToSet, parallelStatefulSetCreation, false, "", "", sc, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + deleteAllStsAndPodsPVCsInNamespace(ctx, client, namespace) + deleteService(namespace, client, service) + }() + + ginkgo.By("Verify volume is provisioned on the preferred datatsore") + err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, + preferredDatastorePaths, nil, false, false, true, dsUrls) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Rebooting VC") + vCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + vcAddress := vCenterHostname[0] + ":" + sshdPort + framework.Logf("vcAddress - %s ", vcAddress) + err = invokeVCenterReboot(vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitForHostToBeUp(vCenterHostname[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Done with reboot") + + essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName} + checkVcenterServicesRunning(ctx, vcAddress, essentialServices) + + //After reboot + multiVCbootstrap() + + ginkgo.By("Perform scaleup/scaledown operation on statefulsets and " + + "verify pv affinity and pod affinity") + err = performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx, client, scaleUpReplicaCount, + scaleDownReplicaCount, statefulset, parallelStatefulSetCreation, namespace, + allowedTopologies, stsScaleUp, stsScaleDown, verifyTopologyAffinity) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is provisioned on the preferred datatsore") + err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, + preferredDatastorePaths, nil, false, false, true, dsUrls) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* Testcase-3: + Create/Restore Snapshot of PVC using single datastore preference + + Steps// + + 1. Assign preferential tag to any one datastore under any one VC + 2. Create SC with allowed topology set to all the volumes + [here, for verification of snapshot, considering single allowed topology of VC3 ] + 3. Create PVC-1 with the above SC + 4. Wait for PVC-1 to reach Bound state. + 5. Describe PV-1 and verify node affinity details + 6. Verify volume should be provisioned on the selected preferred datastore + 7. Create SnapshotClass, Snapshot of PVC-1. + 8. Verify snapshot state. It should be in ready-to-use state. + 9. Verify snapshot should be created on the preferred datastore. + 10. Restore snapshot to create PVC-2 + 11. Wait for PVC-2 to reach Bound state. + 12. Describe PV-2 and verify node affinity details + 13. Verify volume should be provisioned on the selected preferred datastore + 14. Create Pod from restored PVC-2. + 15. Make sure common validation points are met on PV,PVC and POD + 16. Make sure POD is running on the same node as mentioned in the node affinity details. + 17. Perform Cleanup. Delete Snapshot, Pod, PVC, SC + 18. Remove datastore preference tags as part of cleanup. + */ + + ginkgo.It("Assign preferred datatsore to any one VC and verify create restore snapshot", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + preferredDatastoreChosen = 1 + preferredDatastorePaths = nil + var dsUrls []string + var multiVcClientIndex = 2 + topValStartIndex = 2 + topValEndIndex = 3 + + // Considering k8s-zone -> zone-3 i.e. VC3 allowed topology + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + // choose preferred datastore + ginkgo.By("Tag preferred datastore for volume provisioning in VC3") + preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, + allowedTopologies[0].Values[0], + preferredDatastoreChosen, ClusterdatastoreListVc[2], nil, true, multiVcClientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pathsLen := len(preferredDatastorePaths) + for j := 0; j < pathsLen; j++ { + if val, ok := ClusterdatastoreListVc[2][preferredDatastorePaths[j]]; ok { + dsUrls = append(dsUrls, val) + } + } + defer func() { + ginkgo.By("Remove preferred datastore tag") + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], + allowedTopologies[0].Values[0], true, multiVcClientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", + preferredDatastoreTimeOutInterval) + time.Sleep(preferredDatastoreTimeOutInterval) + + ginkgo.By("Create StorageClass and PVC") + storageclass, pvclaim, err := createPVCAndStorageClass(client, namespace, nil, + nil, diskSize, allowedTopologies, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Wait for PVC to be in Bound phase + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim = nil + }() + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := multiVCe2eVSphere.queryCNSVolumeWithResultInMultiVC(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) + + ginkgo.By("Create volume snapshot class, volume snapshot") + volumeSnapshot, volumeSnapshotClass, snapshotId := createSnapshotClassAndVolSnapshot(ctx, snapc, namespace, + pvclaim, volHandle, false, true) + defer func() { + ginkgo.By("Perform cleanup of snapshot created") + performCleanUpForSnapshotCreated(ctx, snapc, namespace, volHandle, volumeSnapshot, snapshotId, + volumeSnapshotClass, pandoraSyncWaitTime, true) + }() + + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + persistentvolumes2, err := fpv.WaitForPVClaimBoundPhase(client, + []*v1.PersistentVolumeClaim{pvclaim2}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle2 := persistentvolumes2[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle2).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(volHandle2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating pod") + pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim2}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is detached from the node") + isDiskDetached, err := multiVCe2eVSphere.waitForVolumeDetachedFromNodeInMultiVC(client, + volHandle2, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volHandle2, + pod.Spec.NodeName)) + }() + + // verifying volume provisioning + ginkgo.By("Verify volume is provisioned on the preferred datatsore") + verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, + ClusterdatastoreListVc[2], true, dsUrls) + + ginkgo.By("Verify PV node affinity and that the PODS are running on " + + "appropriate node as specified in the allowed topologies of SC") + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, + namespace, allowedTopologies, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* Testcase-4 + + Create/Restore Snapshot of PVC, datastore preference change + + 1. Assign preferential tag to any one datastore under any one VC + 2. Create SC with allowed topology set to all the volumes + 3. Create PVC-1 with the above SC + 4. Wait for PVC-1 to reach Bound state. + 5. Describe PV-1 and verify node affinity details + 6. Verify volume should be provisioned on the selected preferred datastore + 7. Make sure common validation points are met on PV,PVC and POD + 8. Change datastore preference(ex- from NFS-2 to vSAN-2) + 9. Create SnapshotClass, Snapshot of PVC-1 + 10. Verify snapshot state. It should be in ready-to-use state. + 11. Restore snapshot to create PVC-2 + 12. PVC-2 should get stuck in Pending state and proper error message should be displayed. + 13. Perform Cleanup. Delete Snapshot, Pod, PVC, SC + 14. Remove datastore preference tags as part of cleanup. + */ + + ginkgo.It("Assign preferred datatsore to any one VC and verify create restore snapshot "+ + "and later change datastore preference", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + preferredDatastoreChosen = 1 + preferredDatastorePaths = nil + var dsUrls []string + var multiVcClientIndex = 2 + topValStartIndex = 2 + topValEndIndex = 3 + + // Considering k8s-zone -> zone-3 i.e. VC3 allowed topology + ginkgo.By("Set specific allowed topology") + allowedTopologies = setSpecificAllowedTopology(allowedTopologies, topkeyStartIndex, topValStartIndex, + topValEndIndex) + + // choose preferred datastore + ginkgo.By("Tag preferred datastore for volume provisioning in VC3") + preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, + allowedTopologies[0].Values[0], + preferredDatastoreChosen, ClusterdatastoreListVc[2], nil, true, multiVcClientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pathsLen := len(preferredDatastorePaths) + for j := 0; j < pathsLen; j++ { + if val, ok := ClusterdatastoreListVc[2][preferredDatastorePaths[j]]; ok { + dsUrls = append(dsUrls, val) + } + } + defer func() { + ginkgo.By("Remove preferred datastore tag") + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], + allowedTopologies[0].Values[0], true, multiVcClientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", + preferredDatastoreTimeOutInterval) + time.Sleep(preferredDatastoreTimeOutInterval) + + ginkgo.By("Create StorageClass and PVC") + storageclass, pvclaim, err := createPVCAndStorageClass(client, namespace, nil, + nil, diskSize, allowedTopologies, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, + *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Wait for PVC to be in Bound phase + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim = nil + }() + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := multiVCe2eVSphere.queryCNSVolumeWithResultInMultiVC(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volHandle)) + + ginkgo.By("Creating pod") + pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is detached from the node") + isDiskDetached, err := multiVCe2eVSphere.waitForVolumeDetachedFromNodeInMultiVC(client, + volHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volHandle, + pod.Spec.NodeName)) + }() + + // verifying volume provisioning + ginkgo.By("Verify volume is provisioned on the preferred datatsore") + verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, + ClusterdatastoreListVc[2], true, dsUrls) + + ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], + allowedTopologyRacks[2], true, multiVcClientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // choose preferred datastore + ginkgo.By("Tag preferred datastore for volume provisioning in VC3") + preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, + allowedTopologies[0].Values[0], preferredDatastoreChosen, ClusterdatastoreListVc[2], + preferredDatastorePaths, true, multiVcClientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", + preferredDatastoreTimeOutInterval) + time.Sleep(preferredDatastoreTimeOutInterval) + + ginkgo.By("Create volume snapshot class, volume snapshot") + volumeSnapshot, volumeSnapshotClass, snapshotId := createSnapshotClassAndVolSnapshot(ctx, snapc, namespace, + pvclaim, volHandle, false, true) + defer func() { + ginkgo.By("Perform cleanup of snapshot created") + performCleanUpForSnapshotCreated(ctx, snapc, namespace, volHandle, volumeSnapshot, snapshotId, + volumeSnapshotClass, pandoraSyncWaitTime, true) + }() + + ginkgo.By("Create PVC from snapshot") + pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, storageclass, nil, + v1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + pvclaim2, err := fpv.CreatePVC(client, namespace, pvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expect claim to fail provisioning volume within the topology") + err = fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim2.Namespace, pvclaim2.Name, framework.Poll, framework.ClaimProvisionTimeout) + gomega.Expect(err).To(gomega.HaveOccurred()) + expectedErrMsg := "failed to get the compatible shared datastore for create volume from snapshot" + err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim2.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) + defer func() { + err = fpv.DeletePersistentVolumeClaim(client, pvclaim2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + }) +}) diff --git a/tests/e2e/multi_vc_utils.go b/tests/e2e/multi_vc_utils.go new file mode 100644 index 0000000000..f9ce6f5939 --- /dev/null +++ b/tests/e2e/multi_vc_utils.go @@ -0,0 +1,1016 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/vmware/govmomi/cns" + cnsmethods "github.com/vmware/govmomi/cns/methods" + cnstypes "github.com/vmware/govmomi/cns/types" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + vim25types "github.com/vmware/govmomi/vim25/types" + "golang.org/x/crypto/ssh" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fssh "k8s.io/kubernetes/test/e2e/framework/ssh" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" +) + +/* +createCustomisedStatefulSets util methods creates statefulset as per the user's +specific requirement and returns the customised statefulset +*/ +func createCustomisedStatefulSets(client clientset.Interface, namespace string, + isParallelPodMgmtPolicy bool, replicas int32, nodeAffinityToSet bool, + allowedTopologies []v1.TopologySelectorLabelRequirement, allowedTopologyLen int, + podAntiAffinityToSet bool, modifyStsSpec bool, stsName string, + accessMode v1.PersistentVolumeAccessMode, sc *storagev1.StorageClass) *appsv1.StatefulSet { + framework.Logf("Preparing StatefulSet Spec") + statefulset := GetStatefulSetFromManifest(namespace) + + if accessMode == "" { + // If accessMode is not specified, set the default accessMode. + accessMode = v1.ReadWriteOnce + } + + if modifyStsSpec { + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Annotations["volume.beta.kubernetes.io/storage-class"] = sc.Name + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = + accessMode + statefulset.Name = stsName + statefulset.Spec.Template.Labels["app"] = statefulset.Name + statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name + } + if nodeAffinityToSet { + nodeSelectorTerms := getNodeSelectorTerms(allowedTopologies) + statefulset.Spec.Template.Spec.Affinity = new(v1.Affinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity = new(v1.NodeAffinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution = new(v1.NodeSelector) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = nodeSelectorTerms + } + if podAntiAffinityToSet { + statefulset.Spec.Template.Spec.Affinity = &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "key": "app", + }, + }, + TopologyKey: "topology.kubernetes.io/zone", + }, + }, + }, + } + + } + if isParallelPodMgmtPolicy { + statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + } + statefulset.Spec.Replicas = &replicas + + framework.Logf("Creating statefulset") + CreateStatefulSet(namespace, statefulset, client) + + framework.Logf("Wait for StatefulSet pods to be in up and running state") + fss.WaitForStatusReadyReplicas(client, statefulset, replicas) + gomega.Expect(fss.CheckMount(client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) + gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + + return statefulset +} + +/* +setSpecificAllowedTopology returns topology map with specific topology fields and values +in a multi-vc setup +*/ +func setSpecificAllowedTopology(allowedTopologies []v1.TopologySelectorLabelRequirement, + topkeyStartIndex int, startIndex int, endIndex int) []v1.TopologySelectorLabelRequirement { + var allowedTopologiesMap []v1.TopologySelectorLabelRequirement + specifiedAllowedTopology := v1.TopologySelectorLabelRequirement{ + Key: allowedTopologies[topkeyStartIndex].Key, + Values: allowedTopologies[topkeyStartIndex].Values[startIndex:endIndex], + } + allowedTopologiesMap = append(allowedTopologiesMap, specifiedAllowedTopology) + + return allowedTopologiesMap +} + +/* +If we have multiple statefulsets, deployment Pods, PVCs/PVs created on a given namespace and for performing +cleanup of these multiple sts creation, deleteAllStsAndPodsPVCsInNamespace is used +*/ +func deleteAllStsAndPodsPVCsInNamespace(ctx context.Context, c clientset.Interface, ns string) { + StatefulSetPoll := 10 * time.Second + StatefulSetTimeout := 10 * time.Minute + ssList, err := c.AppsV1().StatefulSets(ns).List(context.TODO(), + metav1.ListOptions{LabelSelector: labels.Everything().String()}) + framework.ExpectNoError(err) + errList := []string{} + for i := range ssList.Items { + ss := &ssList.Items[i] + var err error + if ss, err = scaleStatefulSetPods(c, ss, 0); err != nil { + errList = append(errList, fmt.Sprintf("%v", err)) + } + fss.WaitForStatusReplicas(c, ss, 0) + framework.Logf("Deleting statefulset %v", ss.Name) + if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, + metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { + errList = append(errList, fmt.Sprintf("%v", err)) + } + } + pvNames := sets.NewString() + pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { + pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), + metav1.ListOptions{LabelSelector: labels.Everything().String()}) + if err != nil { + framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) + return false, nil + } + for _, pvc := range pvcList.Items { + pvNames.Insert(pvc.Spec.VolumeName) + framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, + metav1.DeleteOptions{}); err != nil { + return false, nil + } + } + return true, nil + }) + if pvcPollErr != nil { + errList = append(errList, "Timeout waiting for pvc deletion.") + } + + pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { + pvList, err := c.CoreV1().PersistentVolumes().List(context.TODO(), + metav1.ListOptions{LabelSelector: labels.Everything().String()}) + if err != nil { + framework.Logf("WARNING: Failed to list pvs, retrying %v", err) + return false, nil + } + waitingFor := []string{} + for _, pv := range pvList.Items { + if pvNames.Has(pv.Name) { + waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status)) + } + } + if len(waitingFor) == 0 { + return true, nil + } + framework.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n")) + return false, nil + }) + if pollErr != nil { + errList = append(errList, "Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs.") + + } + if len(errList) != 0 { + framework.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n"))) + } + + framework.Logf("Deleting Deployment Pods and its PVCs") + depList, err := c.AppsV1().Deployments(ns).List( + ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, deployment := range depList.Items { + dep := &deployment + err = updateDeploymentReplicawithWait(c, 0, dep.Name, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deletePolicy := metav1.DeletePropagationForeground + err = c.AppsV1().Deployments(ns).Delete(ctx, dep.Name, metav1.DeleteOptions{PropagationPolicy: &deletePolicy}) + if err != nil { + if apierrors.IsNotFound(err) { + return + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + } +} + +/* +verifyVolumeMetadataInCNSForMultiVC verifies container volume metadata is matching the +one is CNS cache on a multivc environment. +*/ +func verifyVolumeMetadataInCNSForMultiVC(vs *multiVCvSphere, volumeID string, + PersistentVolumeClaimName string, PersistentVolumeName string, + PodName string, Labels ...vim25types.KeyValue) error { + queryResult, err := vs.queryCNSVolumeWithResultInMultiVC(volumeID) + if err != nil { + return err + } + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + if len(queryResult.Volumes) != 1 || queryResult.Volumes[0].VolumeId.Id != volumeID { + return fmt.Errorf("failed to query cns volume %s", volumeID) + } + for _, metadata := range queryResult.Volumes[0].Metadata.EntityMetadata { + kubernetesMetadata := metadata.(*cnstypes.CnsKubernetesEntityMetadata) + if kubernetesMetadata.EntityType == "POD" && kubernetesMetadata.EntityName != PodName { + return fmt.Errorf("entity Pod with name %s not found for volume %s", PodName, volumeID) + } else if kubernetesMetadata.EntityType == "PERSISTENT_VOLUME" && + kubernetesMetadata.EntityName != PersistentVolumeName { + return fmt.Errorf("entity PV with name %s not found for volume %s", PersistentVolumeName, volumeID) + } else if kubernetesMetadata.EntityType == "PERSISTENT_VOLUME_CLAIM" && + kubernetesMetadata.EntityName != PersistentVolumeClaimName { + return fmt.Errorf("entity PVC with name %s not found for volume %s", PersistentVolumeClaimName, volumeID) + } + } + labelMap := make(map[string]string) + for _, e := range queryResult.Volumes[0].Metadata.EntityMetadata { + if e == nil { + continue + } + if e.GetCnsEntityMetadata().Labels == nil { + continue + } + for _, al := range e.GetCnsEntityMetadata().Labels { + labelMap[al.Key] = al.Value + } + for _, el := range Labels { + if val, ok := labelMap[el.Key]; ok { + gomega.Expect(el.Value == val).To(gomega.BeTrue(), + fmt.Sprintf("Actual label Value of the statically provisioned PV is %s but expected is %s", + val, el.Value)) + } else { + return fmt.Errorf("label(%s:%s) is expected in the provisioned PV but its not found", el.Key, el.Value) + } + } + } + ginkgo.By(fmt.Sprintf("successfully verified metadata of the volume %q", volumeID)) + return nil +} + +// govc login cmd for multivc setups +func govcLoginCmdForMultiVC(i int) string { + configUser := strings.Split(multiVCe2eVSphere.multivcConfig.Global.User, ",") + configPwd := strings.Split(multiVCe2eVSphere.multivcConfig.Global.Password, ",") + configvCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + configvCenterPort := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterPort, ",") + + loginCmd := "export GOVC_INSECURE=1;" + loginCmd += fmt.Sprintf("export GOVC_URL='https://%s:%s@%s:%s';", + configUser[i], configPwd[i], configvCenterHostname[i], configvCenterPort[i]) + return loginCmd +} + +/*deletes storage profile deletes the storage profile*/ +func deleteStorageProfile(masterIp string, sshClientConfig *ssh.ClientConfig, + storagePolicyName string, clientIndex int) error { + removeStoragePolicy := govcLoginCmdForMultiVC(clientIndex) + + "govc storage.policy.rm " + storagePolicyName + framework.Logf("Remove storage policy: %s ", removeStoragePolicy) + removeStoragePolicytRes, err := sshExec(sshClientConfig, masterIp, removeStoragePolicy) + if err != nil && removeStoragePolicytRes.Code != 0 { + fssh.LogResult(removeStoragePolicytRes) + return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + removeStoragePolicy, masterIp, err) + } + return nil +} + +/*deletes storage profile deletes the storage profile*/ +func createStorageProfile(masterIp string, sshClientConfig *ssh.ClientConfig, + storagePolicyName string, clientIndex int) error { + createStoragePolicy := govcLoginCmdForMultiVC(clientIndex) + + "govc storage.policy.create -category=shared-cat-todelete1 -tag=shared-tag-todelete1 " + storagePolicyName + framework.Logf("Create storage policy: %s ", createStoragePolicy) + createStoragePolicytRes, err := sshExec(sshClientConfig, masterIp, createStoragePolicy) + if err != nil && createStoragePolicytRes.Code != 0 { + fssh.LogResult(createStoragePolicytRes) + return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + createStoragePolicy, masterIp, err) + } + return nil +} + +/* +performScalingOnStatefulSetAndVerifyPvNodeAffinity accepts 3 bool values - one for scaleup, +second for scale down and third bool value to check node and pod topology affinites +*/ +func performScalingOnStatefulSetAndVerifyPvNodeAffinity(ctx context.Context, client clientset.Interface, + scaleUpReplicaCount int32, scaleDownReplicaCount int32, statefulset *appsv1.StatefulSet, + parallelStatefulSetCreation bool, namespace string, + allowedTopologies []v1.TopologySelectorLabelRequirement, stsScaleUp bool, stsScaleDown bool, + verifyTopologyAffinity bool) error { + + if stsScaleDown { + framework.Logf("Scale down statefulset replica") + err := scaleDownStatefulSetPod(ctx, client, statefulset, namespace, scaleDownReplicaCount, + parallelStatefulSetCreation, true) + if err != nil { + return fmt.Errorf("error scaling down statefulset: %v", err) + } + } + + if stsScaleUp { + framework.Logf("Scale up statefulset replica") + err := scaleUpStatefulSetPod(ctx, client, statefulset, namespace, scaleUpReplicaCount, + parallelStatefulSetCreation, true) + if err != nil { + return fmt.Errorf("error scaling up statefulset: %v", err) + } + } + + if verifyTopologyAffinity { + framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") + err := verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, parallelStatefulSetCreation, true) + if err != nil { + return fmt.Errorf("error verifying PV node affinity and POD node details: %v", err) + } + } + + return nil +} + +/* +createStafeulSetAndVerifyPVAndPodNodeAffinty creates user specified statefulset and +further checks the node and volumes affinities +*/ +func createStafeulSetAndVerifyPVAndPodNodeAffinty(ctx context.Context, client clientset.Interface, + namespace string, parallelPodPolicy bool, replicas int32, nodeAffinityToSet bool, + allowedTopologies []v1.TopologySelectorLabelRequirement, allowedTopologyLen int, + podAntiAffinityToSet bool, parallelStatefulSetCreation bool, modifyStsSpec bool, + stsName string, accessMode v1.PersistentVolumeAccessMode, + sc *storagev1.StorageClass, verifyTopologyAffinity bool) (*v1.Service, *appsv1.StatefulSet, error) { + + ginkgo.By("Create service") + service := CreateService(namespace, client) + + framework.Logf("Create StatefulSet") + statefulset := createCustomisedStatefulSets(client, namespace, parallelPodPolicy, + replicas, nodeAffinityToSet, allowedTopologies, allowedTopologyLen, podAntiAffinityToSet, modifyStsSpec, + "", "", nil) + + if verifyTopologyAffinity { + framework.Logf("Verify PV node affinity and that the PODS are running on appropriate node") + err := verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, parallelStatefulSetCreation, true) + if err != nil { + return nil, nil, fmt.Errorf("error verifying PV node affinity and POD node details: %v", err) + } + } + + return service, statefulset, nil +} + +/* +performOfflineVolumeExpansin performs offline volume expansion on the PVC passed as an input +*/ +func performOfflineVolumeExpansin(client clientset.Interface, + pvclaim *v1.PersistentVolumeClaim, volHandle string, namespace string) error { + + ginkgo.By("Expanding current pvc, performing offline volume expansion") + currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage] + newSize := currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse("1Gi")) + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + expandedPvc, err := expandPVCSize(pvclaim, newSize, client) + if err != nil { + return fmt.Errorf("error expanding PVC size: %v", err) + } + if expandedPvc == nil { + return errors.New("expanded PVC is nil") + } + + pvcSize := expandedPvc.Spec.Resources.Requests[v1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + return fmt.Errorf("error updating PVC size %q", expandedPvc.Name) + } + + ginkgo.By("Waiting for controller volume resize to finish") + err = waitForPvResizeForGivenPvc(expandedPvc, client, totalResizeWaitPeriod) + if err != nil { + return fmt.Errorf("error waiting for controller volume resize: %v", err) + } + + ginkgo.By("Checking for conditions on PVC") + _, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, expandedPvc.Name, pollTimeout) + if err != nil { + return fmt.Errorf("error waiting for PVC conditions: %v", err) + } + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := multiVCe2eVSphere.queryCNSVolumeWithResultInMultiVC(volHandle) + if err != nil { + return fmt.Errorf("error querying CNS volume: %v", err) + } + + if len(queryResult.Volumes) == 0 { + return errors.New("queryCNSVolumeWithResult returned no volume") + } + + ginkgo.By("Verifying disk size requested in volume expansion is honored") + newSizeInMb := int64(3072) + if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb { + return errors.New("wrong disk size after volume expansion") + } + + return nil +} + +/* +performOnlineVolumeExpansin performs online volume expansion on the Pod passed as an input +*/ +func performOnlineVolumeExpansin(f *framework.Framework, client clientset.Interface, + pvclaim *v1.PersistentVolumeClaim, namespace string, pod *v1.Pod) error { + + ginkgo.By("Waiting for file system resize to finish") + expandedPVC, err := waitForFSResize(pvclaim, client) + if err != nil { + return fmt.Errorf("error waiting for file system resize: %v", err) + } + + pvcConditions := expandedPVC.Status.Conditions + expectEqual(len(pvcConditions), 0, "pvc should not have conditions") + + var fsSize int64 + + ginkgo.By("Verify filesystem size for mount point /mnt/volume1") + fsSize, err = getFSSizeMb(f, pod) + if err != nil { + return fmt.Errorf("error getting file system size: %v", err) + } + framework.Logf("File system size after expansion : %s", fsSize) + + if fsSize < diskSizeInMb { + return fmt.Errorf("error updating filesystem size for %q. Resulting filesystem size is %d", expandedPVC.Name, fsSize) + } + ginkgo.By("File system resize finished successfully") + + return nil +} + +/* +This util getClusterNameForMultiVC will return the cluster details of anyone VC passed to this util +*/ +func getClusterNameForMultiVC(ctx context.Context, vs *multiVCvSphere, + clientIndex int) ([]*object.ClusterComputeResource, + *VsanClient, error) { + + var vsanHealthClient *VsanClient + var err error + c := newClientForMultiVC(ctx, vs) + + datacenter := strings.Split(multiVCe2eVSphere.multivcConfig.Global.Datacenters, ",") + + for i, client := range c { + if clientIndex == i { + vsanHealthClient, err = newVsanHealthSvcClient(ctx, client.Client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + finder := find.NewFinder(vsanHealthClient.vim25Client, false) + dc, err := finder.Datacenter(ctx, datacenter[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(dc) + + clusterComputeResource, err := finder.ClusterComputeResourceList(ctx, "*") + framework.Logf("clusterComputeResource %v", clusterComputeResource) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + return clusterComputeResource, vsanHealthClient, err +} + +/* +This util verifyPreferredDatastoreMatchInMultiVC will compare the prefrence of datatsore with the +actual datatsore and expected datastore and will return a bool value if both actual and expected datatsore +gets matched else will return false +This util will basically be used to check where exactly the volume provisioning has happened +*/ +func (vs *multiVCvSphere) verifyPreferredDatastoreMatchInMultiVC(volumeID string, dsUrls []string) bool { + framework.Logf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID) + queryResult, err := vs.queryCNSVolumeWithResultInMultiVC(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + actualDatastoreUrl := queryResult.Volumes[0].DatastoreUrl + flag := false + for _, dsUrl := range dsUrls { + if actualDatastoreUrl == dsUrl { + flag = true + return flag + } + } + return flag +} + +/* +queryCNSVolumeSnapshotWithResultInMultiVC Call CnsQuerySnapshots and returns CnsSnapshotQueryResult +to client +*/ +func (vs *multiVCvSphere) queryCNSVolumeSnapshotWithResultInMultiVC(fcdID string, + snapshotId string) (*cnstypes.CnsSnapshotQueryResult, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var snapshotSpec []cnstypes.CnsSnapshotQuerySpec + var taskResult *cnstypes.CnsSnapshotQueryResult + snapshotSpec = append(snapshotSpec, cnstypes.CnsSnapshotQuerySpec{ + VolumeId: cnstypes.CnsVolumeId{ + Id: fcdID, + }, + SnapshotId: &cnstypes.CnsSnapshotId{ + Id: snapshotId, + }, + }) + + queryFilter := cnstypes.CnsSnapshotQueryFilter{ + SnapshotQuerySpecs: snapshotSpec, + Cursor: &cnstypes.CnsCursor{ + Offset: 0, + Limit: 100, + }, + } + + req := cnstypes.CnsQuerySnapshots{ + This: cnsVolumeManagerInstance, + SnapshotQueryFilter: queryFilter, + } + + for i := 0; i < len(vs.multiVcCnsClient); i++ { + res, err := cnsmethods.CnsQuerySnapshots(ctx, vs.multiVcCnsClient[i].Client, &req) + if err != nil { + return nil, err + } + + task, err := object.NewTask(vs.multiVcClient[i].Client, res.Returnval), nil + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + taskInfo, err := cns.GetTaskInfo(ctx, task) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + taskResult, err = cns.GetQuerySnapshotsTaskResult(ctx, taskInfo) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if taskResult.Entries[0].Snapshot.SnapshotId.Id == snapshotId { + return taskResult, nil + } + } + return taskResult, nil +} + +/* +getDatastoresListFromMultiVCs util will fetch the list of datastores available in all +the multi-vc setup +This util will return key-value combination of datastore-name:datastore-url of all the 3 VCs available +in a multi-vc setup +*/ +func getDatastoresListFromMultiVCs(masterIp string, sshClientConfig *ssh.ClientConfig, + cluster *object.ClusterComputeResource, isMultiVcSetup bool) (map[string]string, map[string]string, + map[string]string, error) { + ClusterdatastoreListMapVc1 := make(map[string]string) + ClusterdatastoreListMapVc2 := make(map[string]string) + ClusterdatastoreListMapVc3 := make(map[string]string) + + configvCenterHostname := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",") + for i := 0; i < len(configvCenterHostname); i++ { + datastoreListByVC := govcLoginCmdForMultiVC(i) + + "govc object.collect -s -d ' ' " + cluster.InventoryPath + " host | xargs govc datastore.info -H | " + + "grep 'Path\\|URL' | tr -s [:space:]" + + framework.Logf("cmd : %s ", datastoreListByVC) + result, err := sshExec(sshClientConfig, masterIp, datastoreListByVC) + if err != nil && result.Code != 0 { + fssh.LogResult(result) + return nil, nil, nil, fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + datastoreListByVC, masterIp, err) + } + + datastoreList := strings.Split(result.Stdout, "\n") + + ClusterdatastoreListMap := make(map[string]string) // Empty the map + + for i := 0; i < len(datastoreList)-1; i = i + 2 { + key := strings.ReplaceAll(datastoreList[i], " Path: ", "") + value := strings.ReplaceAll(datastoreList[i+1], " URL: ", "") + ClusterdatastoreListMap[key] = value + } + + if i == 0 { + ClusterdatastoreListMapVc1 = ClusterdatastoreListMap + } else if i == 1 { + ClusterdatastoreListMapVc2 = ClusterdatastoreListMap + } else if i == 2 { + ClusterdatastoreListMapVc3 = ClusterdatastoreListMap + } + } + + return ClusterdatastoreListMapVc1, ClusterdatastoreListMapVc2, ClusterdatastoreListMapVc3, nil +} + +/* +readVsphereConfCredentialsInMultiVCcSetup util accepts a vsphere conf parameter, reads all the values +of vsphere conf and returns a testConf file +*/ +func readVsphereConfCredentialsInMultiVcSetup(cfg string) (e2eTestConfig, error) { + var config e2eTestConfig + + virtualCenters := make([]string, 0) + userList := make([]string, 0) + passwordList := make([]string, 0) + portList := make([]string, 0) + dataCenterList := make([]string, 0) + + key, value := "", "" + lines := strings.Split(cfg, "\n") + for index, line := range lines { + if index == 0 { + // Skip [Global]. + continue + } + words := strings.Split(line, " = ") + if strings.Contains(words[0], "topology-categories=") { + words = strings.Split(line, "=") + } + + if len(words) == 1 { + if strings.Contains(words[0], "Snapshot") { + continue + } + if strings.Contains(words[0], "Labels") { + continue + } + words = strings.Split(line, " ") + if strings.Contains(words[0], "VirtualCenter") { + value = words[1] + value = strings.TrimSuffix(value, "]") + value = trimQuotes(value) + config.Global.VCenterHostname = value + virtualCenters = append(virtualCenters, value) + } + continue + } + key = words[0] + value = trimQuotes(words[1]) + var strconvErr error + switch key { + case "insecure-flag": + if strings.Contains(value, "true") { + config.Global.InsecureFlag = true + } else { + config.Global.InsecureFlag = false + } + case "cluster-id": + config.Global.ClusterID = value + case "cluster-distribution": + config.Global.ClusterDistribution = value + case "user": + config.Global.User = value + userList = append(userList, value) + case "password": + config.Global.Password = value + passwordList = append(passwordList, value) + case "datacenters": + config.Global.Datacenters = value + dataCenterList = append(dataCenterList, value) + case "port": + config.Global.VCenterPort = value + portList = append(portList, value) + case "cnsregistervolumes-cleanup-intervalinmin": + config.Global.CnsRegisterVolumesCleanupIntervalInMin, strconvErr = strconv.Atoi(value) + if strconvErr != nil { + return config, fmt.Errorf("invalid value for cnsregistervolumes-cleanup-intervalinmin: %s", value) + } + case "topology-categories": + config.Labels.TopologyCategories = value + case "global-max-snapshots-per-block-volume": + config.Snapshot.GlobalMaxSnapshotsPerBlockVolume, strconvErr = strconv.Atoi(value) + if strconvErr != nil { + return config, fmt.Errorf("invalid value for global-max-snapshots-per-block-volume: %s", value) + } + case "csi-fetch-preferred-datastores-intervalinmin": + config.Global.CSIFetchPreferredDatastoresIntervalInMin, strconvErr = strconv.Atoi(value) + if strconvErr != nil { + return config, fmt.Errorf("invalid value for csi-fetch-preferred-datastores-intervalinmin: %s", value) + } + case "targetvSANFileShareDatastoreURLs": + config.Global.TargetvSANFileShareDatastoreURLs = value + case "query-limit": + config.Global.QueryLimit, strconvErr = strconv.Atoi(value) + if strconvErr != nil { + return config, fmt.Errorf("invalid value for query-limit: %s", value) + } + case "list-volume-threshold": + config.Global.ListVolumeThreshold, strconvErr = strconv.Atoi(value) + if strconvErr != nil { + return config, fmt.Errorf("invalid value for list-volume-threshold: %s", value) + } + default: + return config, fmt.Errorf("unknown key %s in the input string", key) + } + } + + config.Global.VCenterHostname = strings.Join(virtualCenters, ",") + config.Global.User = strings.Join(userList, ",") + config.Global.Password = strings.Join(passwordList, ",") + config.Global.VCenterPort = strings.Join(portList, ",") + config.Global.Datacenters = strings.Join(dataCenterList, ",") + + return config, nil +} + +/* +writeNewDataAndUpdateVsphereConfSecret uitl edit the vsphere conf and returns the updated +vsphere config sceret +*/ +func writeNewDataAndUpdateVsphereConfSecret(client clientset.Interface, ctx context.Context, + csiNamespace string, cfg e2eTestConfig) error { + var result string + + // fetch current secret + currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + if err != nil { + return err + } + + // modify vshere conf file + vCenterHostnames := strings.Split(cfg.Global.VCenterHostname, ",") + users := strings.Split(cfg.Global.User, ",") + passwords := strings.Split(cfg.Global.Password, ",") + dataCenters := strings.Split(cfg.Global.Datacenters, ",") + ports := strings.Split(cfg.Global.VCenterPort, ",") + + result += fmt.Sprintf("[Global]\ncluster-distribution = \"%s\"\n"+ + "csi-fetch-preferred-datastores-intervalinmin = %d\n"+ + "query-limit = %d\nlist-volume-threshold = %d\n\n", + cfg.Global.ClusterDistribution, cfg.Global.CSIFetchPreferredDatastoresIntervalInMin, cfg.Global.QueryLimit, + cfg.Global.ListVolumeThreshold) + for i := 0; i < len(vCenterHostnames); i++ { + result += fmt.Sprintf("[VirtualCenter \"%s\"]\ninsecure-flag = \"%t\"\nuser = \"%s\"\npassword = \"%s\"\n"+ + "port = \"%s\"\ndatacenters = \"%s\"\n\n", + vCenterHostnames[i], cfg.Global.InsecureFlag, users[i], passwords[i], ports[i], dataCenters[i]) + } + + result += fmt.Sprintf("[Snapshot]\nglobal-max-snapshots-per-block-volume = %d\n\n", + cfg.Snapshot.GlobalMaxSnapshotsPerBlockVolume) + result += fmt.Sprintf("[Labels]\ntopology-categories = \"%s\"\n", cfg.Labels.TopologyCategories) + + framework.Logf(result) + + // update config secret with newly updated vshere conf file + framework.Logf("Updating the secret to reflect new conf credentials") + currentSecret.Data[vSphereCSIConf] = []byte(result) + _, err = client.CoreV1().Secrets(csiNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + if err != nil { + return err + } + + return nil +} + +// readVsphereConfSecret method is used to read csi vsphere conf file +func readVsphereConfSecret(client clientset.Interface, ctx context.Context, + csiNamespace string) (e2eTestConfig, error) { + + // fetch current secret + currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, configSecret, metav1.GetOptions{}) + if err != nil { + return e2eTestConfig{}, err + } + + // read vsphere conf + originalConf := string(currentSecret.Data[vSphereCSIConf]) + vsphereCfg, err := readVsphereConfCredentialsInMultiVcSetup(originalConf) + if err != nil { + return e2eTestConfig{}, err + } + + return vsphereCfg, nil +} + +/* +setNewNameSpaceInCsiYaml util installs the csi yaml in new namespace +*/ +func setNewNameSpaceInCsiYaml(client clientset.Interface, sshClientConfig *ssh.ClientConfig, originalNS string, + newNS string, allMasterIps []string) error { + + var controlIp string + ignoreLabels := make(map[string]string) + + for _, masterIp := range allMasterIps { + deleteCsiYaml := "kubectl delete -f vsphere-csi-driver.yaml" + framework.Logf("Delete csi driver yaml: %s ", deleteCsiYaml) + deleteCsi, err := sshExec(sshClientConfig, masterIp, deleteCsiYaml) + if err != nil && deleteCsi.Code != 0 { + if strings.Contains(err.Error(), "does not exist") { + framework.Logf("Retry other master nodes") + continue + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "couldn't execute command on host: %v , error: %s", + masterIp, err) + } + } + if err == nil { + controlIp = masterIp + break + } + } + + findAndSetVal := "sed -i 's/" + originalNS + "/" + newNS + "/g' " + "vsphere-csi-driver.yaml" + framework.Logf("Set test namespace to csi yaml: %s ", findAndSetVal) + setVal, err := sshExec(sshClientConfig, controlIp, findAndSetVal) + if err != nil && setVal.Code != 0 { + fssh.LogResult(setVal) + return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + findAndSetVal, controlIp, err) + } + + applyCsiYaml := "kubectl apply -f vsphere-csi-driver.yaml" + framework.Logf("Apply updated csi yaml: %s ", applyCsiYaml) + applyCsi, err := sshExec(sshClientConfig, controlIp, applyCsiYaml) + if err != nil && applyCsi.Code != 0 { + fssh.LogResult(applyCsi) + return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + applyCsiYaml, controlIp, err) + } + + // Wait for the CSI Pods to be up and Running + list_of_pods, err := fpod.GetPodsInNamespace(client, newNS, ignoreLabels) + if err != nil { + return err + } + num_csi_pods := len(list_of_pods) + err = fpod.WaitForPodsRunningReady(client, newNS, int32(num_csi_pods), 0, + pollTimeout, ignoreLabels) + if err != nil { + return err + } + return nil +} + +/* +deleteVsphereConfigSecret deletes vsphere config secret +*/ +func deleteVsphereConfigSecret(client clientset.Interface, ctx context.Context, + originalNS string) error { + + // get current secret + currentSecret, err := client.CoreV1().Secrets(originalNS).Get(ctx, configSecret, metav1.GetOptions{}) + if err != nil { + return err + } + + // delete current secret + err = client.CoreV1().Secrets(originalNS).Delete(ctx, currentSecret.Name, metav1.DeleteOptions{}) + if err != nil { + return err + } + + return nil +} + +/* +createNamespaceSpec util creates a spec required for creating a namespace +*/ +func createNamespaceSpec(nsName string) *v1.Namespace { + var namespace = &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: nsName, + }, + } + return namespace +} + +/* +createNamespace creates a namespace +*/ +func createNamespace(client clientset.Interface, ctx context.Context, nsName string) (*v1.Namespace, error) { + + framework.Logf("Create namespace") + namespaceSpec := createNamespaceSpec(nsName) + namespace, err := client.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + return namespace, nil +} + +/* +createVsphereConfigSecret util creates csi vsphere conf and later creates a new config secret +*/ +func createVsphereConfigSecret(namespace string, cfg e2eTestConfig, sshClientConfig *ssh.ClientConfig, + allMasterIps []string) error { + + var conf string + var controlIp string + + for _, masterIp := range allMasterIps { + readCsiYaml := "ls -l vsphere-csi-driver.yaml" + framework.Logf("list csi driver yaml: %s ", readCsiYaml) + grepCsiNs, err := sshExec(sshClientConfig, masterIp, readCsiYaml) + if err != nil && grepCsiNs.Code != 0 { + if strings.Contains(err.Error(), "No such file or directory") { + framework.Logf("Retry other master nodes") + continue + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "couldn't execute command on host: %v , error: %s", + masterIp, err) + } + } + if err == nil { + controlIp = masterIp + break + } + } + + vCenterHostnames := strings.Split(cfg.Global.VCenterHostname, ",") + users := strings.Split(cfg.Global.User, ",") + passwords := strings.Split(cfg.Global.Password, ",") + dataCenters := strings.Split(cfg.Global.Datacenters, ",") + ports := strings.Split(cfg.Global.VCenterPort, ",") + + conf = fmt.Sprintf("tee csi-vsphere.conf >/dev/null < 0 { + for _, snap := range snaps { + framework.Logf("Delete volume snapshot %v", snap.Name) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snap.Name, pandoraSyncWaitTime) + } + for i, snapshotId := range snapIDs { + framework.Logf("Verify snapshot entry %v is deleted from CNS for volume %v", snapshotId, volIds[i]) + err = waitForCNSSnapshotToBeDeleted(volIds[i], snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + }() + + ginkgo.By("Change volume allocation of pol1 and pol2 to EZT and apply the changes") + ginkgo.By("Verify the volume allocation type changes fail") + for i, volId := range volIds { + framework.Logf("updating policy %v with %v allocation type", policyNames[i/2], eztAllocType) + err = updateVmfsPolicyAlloctype(ctx, pc, eztAllocType, policyNames[i/2], policyIds[i/2]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf( + "trying to reconfigure volume %v with policy %v which is expected to fail", volId, policyNames[i/2]) + err = e2eVSphere.reconfigPolicy(ctx, volId, policyIds[i/2].UniqueId) + framework.Logf("reconfigure volume %v with policy %v errored out with:\n%v", volId, policyNames[i/2], err) + gomega.Expect(err).To(gomega.HaveOccurred()) + } + + ginkgo.By("Delete snapshots created in step 6") + for _, snap := range snaps { + framework.Logf("Delete volume snapshot %v", snap.Name) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, snap.Name, pandoraSyncWaitTime) + } + for i, snapshotId := range snapIDs { + framework.Logf("Verify snapshot entry %v is deleted from CNS for volume %v", snapshotId, volIds[i]) + err = waitForCNSSnapshotToBeDeleted(volIds[i], snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + snaps = []*snapV1.VolumeSnapshot{} + ginkgo.By("Apply volume allocation type changes again and verify that it is successful this time") + for i, volId := range volIds { + framework.Logf("trying to reconfigure volume %v with policy %v", volId, policyNames[i/2]) + err = e2eVSphere.reconfigPolicy(ctx, volIds[i], policyIds[i/2].UniqueId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }) + + /* + Verify expansion during Thin -> EZT, LZT -> EZT conversion (should take >vpxd task timeout) + Steps for offline volumes: + 1 Create 2 SPBM policies with thin, and LZT volume allocation respectively, say pol1, pol2 + 2 Create 2 SCs each with a SPBM policy created from step 1 + 3 Create a PVC of 10g using each of the SCs created from step 2 + 4 Wait for PVCs created in step 3 to be bound + 5 Change volume allocation of pol1 and pol2 to EZT and opt for immediate updation + 6 While updation in step 5 is still running, expand all PVCs created in step 3 such that resize should take + more than vpxd task timeout for PVCs updated to EZT allocation + 7 Wait for all PVCs created in step 3 to reach FileSystemResizePending state + 8 Delete the PVCs created in step 3 + 9 Delete the SCs created in step 2 + 10 Deleted the SPBM policies created in step 1 + + Steps for online volumes: + 1 Create 2 SPBM policies with thin, and LZT volume allocation respectively, say pol1, pol2 + 2 Create 2 SCs each with a SPBM policy created from step 1 + 3 Create a PVC of 10g using each of the SCs created from step 2 + 4 Wait for PVCs created in step 3 to be bound + 5 Create pods using PVCs created in step 4 + 6 Change volume allocation of pol1 and pol2 to EZT and opt for immediate updation + 7 While updation in step 5 is still running, expand all PVCs created in step 3 such that resize should take + more than vpxd task timeout for PVCs updated to EZT allocation + 8 Wait for file system resize to complete on all PVCs created in step 3 + 9 Delete pods created in step 4 + 10 Delete the PVCs created in step 3 + 11 Delete the SCs created in step 2 + 12 Deleted the SPBM policies created in step 1 + */ + ginkgo.It("[csi-block-vanilla][csi-guest][csi-supervisor] Verify expansion during Thin -> EZT, LZT -> EZT"+ + " conversion (should take >vpxd task timeout)", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + scParameters := make(map[string]string) + policyNames := []string{} + policyIds := []*pbmtypes.PbmProfileId{} + pvcs := []*v1.PersistentVolumeClaim{} + scs := []*storagev1.StorageClass{} + pvcs2d := [][]*v1.PersistentVolumeClaim{} + largeSize := os.Getenv(envDiskSizeLarge) + pvc10g := "10Gi" + if largeSize == "" { + largeSize = diskSizeLarge + } + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + allocationTypes := []string{ + thinAllocType, + lztAllocType, + } + + ginkgo.By("create 2 SPBM policies with thin, LZT volume allocation respectively") + for _, at := range allocationTypes { + policyID, policyName := createVmfsStoragePolicy( + ctx, pc, at, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + scParameters[scParamStoragePolicyName] = policyName + policyNames = append(policyNames, policyName) + policyIds = append(policyIds, policyID) + } + + defer func() { + for _, policyID := range policyIds { + deleteStoragePolicy(ctx, pc, policyID) + } + }() + + ginkgo.By("Create 2 SCs each with a SPBM policy created from step 1") + if vanillaCluster { + for i, policyName := range policyNames { + scParameters[scParamStoragePolicyName] = policyName + policyNames = append(policyNames, policyName) + policyIds = append(policyIds, policyIds[i]) + storageclass, err := createStorageClass(client, + scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scs = append(scs, storageclass) + } + } else if supervisorCluster { + assignPolicyToWcpNamespace(client, ctx, namespace, policyNames) + for _, policyName := range policyNames { + createResourceQuota(client, namespace, rqLimit, policyName) + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scs = append(scs, storageclass) + } + } else if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + assignPolicyToWcpNamespace(svcClient, ctx, svNamespace, policyNames) + for _, policyName := range policyNames { + createResourceQuota(svcClient, svNamespace, rqLimit, policyName) + storageclass, err := svcClient.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scs = append(scs, storageclass) + } + } + + defer func() { + ginkgo.By("Delete the SCs created in step 2") + if vanillaCluster { + for _, sc := range scs { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + }() + + ginkgo.By("Create two PVCs of 10g using each of the SCs created from step 2") + for _, sc := range scs { + pvclaim, err := createPVC(client, namespace, nil, pvc10g, sc, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim) + pvclaim2, err := createPVC(client, namespace, nil, pvc10g, sc, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim2) + // one pvc for online case and one more pvc for offline case + pvcs2d = append(pvcs2d, []*v1.PersistentVolumeClaim{pvclaim}) + } + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volIds := []string{} + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + for i, pv := range pvs { + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pvs[0].Spec.CSI.VolumeHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[i/2]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedvmfsURL}) + volIds = append(volIds, volumeID) + } + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + for _, pvc := range pvcs { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + for _, volId := range volIds { + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating 2 pods to attach 2 PVCs") + pods := createMultiplePods(ctx, client, pvcs2d, true) + + defer func() { + ginkgo.By("Delete pods") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + fsSizes := []int64{} + for _, pod := range pods { + originalSizeInMb, err := getFSSizeMb(f, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fsSizes = append(fsSizes, originalSizeInMb) + } + + ginkgo.By("Change volume allocation of pol1 and pol2 to EZT") + for i, policyId := range policyIds { + err = updateVmfsPolicyAlloctype(ctx, pc, eztAllocType, policyNames[i], policyId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Apply the volume allocation changes and while it is still running, expand PVCs created to a" + + " large size such that the resize takes more than vpxd task timeout") + var wg sync.WaitGroup + wg.Add(len(volIds)) + n := resource.MustParse(largeSize) + sizeInInt, b := n.AsInt64() + gomega.Expect(b).To(gomega.BeTrue()) + // since we are creating 4 volumes reducing the size of each expansion by half to start with + newSize := *(resource.NewQuantity(sizeInInt/2, resource.BinarySI)) + start := time.Now() + for i, volId := range volIds { + go reconfigPolicyParallel(ctx, volId, policyIds[i/2].UniqueId, &wg) + } + wg.Wait() + + wg.Add(len(volIds)) + for _, pvc := range pvcs { + go resize(client, pvc, pvc.Spec.Resources.Requests[v1.ResourceStorage], newSize, &wg) + } + wg.Wait() + for i := range pvcs { + err = waitForPvResize(pvs[i], client, newSize, totalResizeWaitPeriod*2) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "resize exceeded timeout") + } + elapsed := time.Since(start) + + ginkgo.By("Verify PVC expansion took longer than vpxd timeout") + gomega.Expect(elapsed > time.Second*time.Duration(vpxdReducedTaskTimeoutSecsInt)).To( + gomega.BeTrue(), "PVC expansion was faster than vpxd timeout") + + ginkgo.By("Wait and verify the file system resize on pvcs") + for i := range pods { + framework.Logf("Waiting for file system resize to finish for pvc %v", pvcs[i*2].Name) + pvcs[i*2], err = waitForFSResize(pvcs[i*2], client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fsSize, err := getFSSizeMb(f, pods[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %v", fsSize) + gomega.Expect(fsSize > fsSizes[i]).To(gomega.BeTrue(), + fmt.Sprintf( + "filesystem size %v is not > than before expansion %v for pvc %q", + fsSize, fsSizes[i], pvcs[i*2].Name)) + + framework.Logf("File system resize finished successfully for pvc %v", pvcs[i*2].Name) + } + + }) + + /* + Relocate from vsand datastore to vmfs datastore and vice versa + Steps: + 1. Create a SPBM policy with EZT volume allocation on vmfs datastore. + 2. Create a SC, say sc1 + 3. Create a large PVC pvc1 using SC created in step 2, this should take more than 40 mins + 4. Verify that pvc1 is bound and backend fcd is on vsand datastore. + 5. Create a pod, say pod1 using pvc1. + 6. write some data to the volume. + 7. delete pod1. + 8. Relocate fcd to vmfs ds. + 9. Recreate pod1 + 10.Verify pod1 is running and pvc1 is accessible and verify the data written in step 6 + 11.Delete pod1 + 12. Relocate fcd to vsanDirect ds. + 13. Recreate pod1 + 14.Verify pod1 is running and pvc1 is accessible and verify the data written in step 6 + 15. Delete pod1 + 16. Delete pvc1, sc1 + 17. Delete SPBM policies created + */ + ginkgo.It("[csi-wcp-vsan-direct] Relocate from vmfs datastore to vsand datastore "+ + "and vice versa", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + vsanDDatstoreURL := os.Getenv(envVsanDDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envVsanDDatastoreURL)) + } + + datastoreUrls := []string{sharedvmfsURL, vsanDDatstoreURL} + policyNames := []string{} + pvcs := []*v1.PersistentVolumeClaim{} + scs := []*storagev1.StorageClass{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, vsanDDatstoreURL) + defer func() { + detachTagFromDS(ctx, tagID, vsanDDatstoreURL) + }() + + ginkgo.By("create SPBM policy with EZT volume allocation") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var pvclaim *v1.PersistentVolumeClaim + var pod *v1.Pod + + policyID, policyName := createStoragePolicyWithSharedVmfsNVsand( + ctx, pc, eztAllocType, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + policyNames = append(policyNames, policyName) + + assignPolicyToWcpNamespace(client, ctx, namespace, policyNames) + createResourceQuota(client, namespace, rqLimit, policyName) + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, policyName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim, pod = createVsanDPvcAndPod(client, ctx, + namespace, storageclass, eztVsandPvcName, eztVsandPodName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim) + scs = append(scs, storageclass) + + defer func() { + ginkgo.By("Delete the SCs created in step 2") + for _, sc := range scs { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + + volumeID := pvs[0].Spec.CSI.VolumeHandle + storagePolicyExists, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[0]) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") + + defer func() { + ginkgo.By("Delete the pod created") + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete the PVCs created in step 3") + for i, pvc := range pvcs { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(pvs[i].Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume is present on %s", dsUrlWhereVolumeIsPresent) + srcVsandDsUrl := dsUrlWhereVolumeIsPresent + e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + } + } + + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + _, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) + + storagePolicyExists, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[0]) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") + + ginkgo.By("Creating a pod") + podSpec := getVsanDPodSpec(namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand, eztVsandPodName) + pod, err = client.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = fpod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Relocate back to vsand datstore") + + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + dsRefDest = getDsMoRefFromURL(ctx, srcVsandDsUrl) + _, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{srcVsandDsUrl}) + + storagePolicyExists, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[0]) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") + + ginkgo.By("Creating a pod") + podSpec = getVsanDPodSpec(namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand, eztVsandPodName) + pod, err = client.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = fpod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Delete pods created in step 5") + deletePodsAndWaitForVolsToDetach(ctx, client, []*v1.Pod{pod}, true) + + }) + + /* + Start attached volume's conversion and relocation in parallel + Steps for offline volumes: + 1. Create a SPBM policy with lzt volume allocation for vmfs datastore. + 2. Create SC using policy created in step 1 + 3. Create PVC using SC created in step 2 + 4. Verify that pvc created in step 3 are bound + 5. Create a pod, say pod1 using pvc created in step 4. + 6. Start writing some IO to pod. + 7. Delete pod1. + 8. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 9. While relocation is running perform volume conversion. + 10. Verify relocation was successful. + 11. Verify online volume conversion is successful. + 12. Delete all the objects created during the test. + + Steps for online volumes: + 1. Create a SPBM policy with lzt volume allocation for vmfs datastore. + 2. Create SC using policy created in step 1 + 3. Create PVC using SC created in step 2 + 4. Verify that pvc created in step 3 are bound + 5. Create a pod, say pod1 using pvc created in step 4. + 6. Start writing some IO to pod which run in parallel to steps 6-7. + 7. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 8. While relocation is running perform volume conversion. + 9. Verify the IO written so far. + 10. Verify relocation was successful. + 11. Verify online volume conversion is successful. + 12. Delete all the objects created during the test. + */ + ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized]"+ + " Start attached volume's conversion and relocation in parallel", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sharedvmfsURL, sharedvmfs2URL := "", "" + var datastoreUrls []string + var policyName string + volIdToDsUrlMap := make(map[string]string) + volIdToCnsRelocateVolTask := make(map[string]*object.Task) + + sharedvmfsURL = os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedvmfs2URL = os.Getenv(envSharedVMFSDatastore2URL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastore2URL)) + } + datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedvmfs2URL) + + scParameters := make(map[string]string) + policyNames := []string{} + pvcs := []*v1.PersistentVolumeClaim{} + pvclaims2d := [][]*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedvmfs2URL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfs2URL) + }() + + ginkgo.By("create SPBM policy with lzt volume allocation") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var policyID *pbmtypes.PbmProfileId + + policyID, policyName = createVmfsStoragePolicy( + ctx, pc, lztAllocType, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + policyNames = append(policyNames, policyName) + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, "", nil, "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim2, err := createPVC(client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim, pvclaim2) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim}) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim2}) + + defer func() { + if vanillaCluster { + ginkgo.By("Delete the SCs created in step 2") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volIds := []string{} + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + for i, pv := range pvs { + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[i/2]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + volIds = append(volIds, volumeID) + } + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + for i, pvc := range pvcs { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volIds[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + ginkgo.By("Create pods with using the PVCs created in step 3 and wait for them to be ready") + ginkgo.By("verify we can read and write on the PVCs") + pods := createMultiplePods(ctx, client, pvclaims2d, true) + defer func() { + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, volId := range volIds { + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volId) + framework.Logf("Volume is present on %s for volume: %s", dsUrlWhereVolumeIsPresent, volId) + e2eVSphere.verifyDatastoreMatch(volId, datastoreUrls) + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + } + } + framework.Logf("dest url: %s", destDsUrl) + volIdToDsUrlMap[volId] = destDsUrl + } + + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 100mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=100").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("delete pod1") + deletePodsAndWaitForVolsToDetach(ctx, client, []*v1.Pod{pods[1]}, true) + + ginkgo.By("Updating policy volume allocation from lzt -> ezt") + err = updateVmfsPolicyAlloctype(ctx, pc, eztAllocType, policyName, policyID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Start relocation of volume to a different datastore") + for _, volId := range volIds { + dsRefDest := getDsMoRefFromURL(ctx, volIdToDsUrlMap[volId]) + task, err := e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volId, dsRefDest, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volIdToCnsRelocateVolTask[volId] = task + framework.Logf("Waiting for a few seconds for relocation to be started properly on VC") + time.Sleep(time.Duration(10) * time.Second) + } + + ginkgo.By("Perform volume conversion and write IO to pod while relocate volume to different datastore") + var wg sync.WaitGroup + wg.Add(1 + len(volIds)) + go writeKnownData2PodInParallel(f, pods[0], testdataFile, &wg) + for _, volId := range volIds { + go reconfigPolicyParallel(ctx, volId, policyID.UniqueId, &wg) + } + wg.Wait() + + ginkgo.By("Verify the data on the PVCs match what was written in step 7") + verifyKnownDataInPod(f, pods[0], testdataFile) + + for _, volId := range volIds { + ginkgo.By(fmt.Sprintf("Wait for relocation task to complete for volumeID: %s", volId)) + waitForCNSTaskToComplete(ctx, volIdToCnsRelocateVolTask[volId]) + ginkgo.By("Verify relocation of volume is successful") + e2eVSphere.verifyDatastoreMatch(volId, []string{volIdToDsUrlMap[volId]}) + storagePolicyExists, err := e2eVSphere.VerifySpbmPolicyOfVolume(volId, policyNames[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volId, true) + } + + ginkgo.By("Delete the pod created") + err = fpod.DeletePodWithWait(client, pods[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* + Start attached volume's conversion and relocation of volume with updation of its metadata in parallel + Steps for offline volumes: + 1. Create a SPBM policy with lzt volume allocation for vmfs datastore. + 2. Create SC using policy created in step 1 + 3. Create PVC using SC created in step 2 + 4. Verify that pvc created in step 3 are bound + 5. Create a pod, say pod1 using pvc created in step 4. + 6. Start writing some IO to pod. + 7. Delete pod1. + 8. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 9. While relocation is running add labels to PV and PVC + in parallel with volume conversion. + 10. Verify relocation was successful. + 11. Verify online volume conversion is successful. + 12. Delete all the objects created during the test. + + Steps for online volumes: + 1. Create a SPBM policy with lzt volume allocation for vmfs datastore. + 2. Create SC using policy created in step 1 + 3. Create PVC using SC created in step 2 + 4. Verify that pvc created in step 3 are bound + 5. Create a pod, say pod1 using pvc created in step 4. + 6. Start writing some IO to pod which run in parallel to steps 6-7. + 7. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 8. While relocation is running add labels to PV and PVC + in parallel with volume conversion. + 9. Verify the IO written so far. + 10. Verify relocation was successful. + 11. Verify online volume conversion is successful. + 12. Delete all the objects created during the test. + */ + ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized]"+ + " Start attached volume's conversion and relocation of volume"+ + " with updation of its metadata in parallel", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sharedvmfsURL, sharedvmfs2URL := "", "" + var datastoreUrls []string + var policyName string + volIdToDsUrlMap := make(map[string]string) + labels := make(map[string]string) + labels[labelKey] = labelValue + volIdToCnsRelocateVolTask := make(map[string]*object.Task) + + sharedvmfsURL = os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedvmfs2URL = os.Getenv(envSharedVMFSDatastore2URL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastore2URL)) + } + datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedvmfs2URL) + + scParameters := make(map[string]string) + policyNames := []string{} + pvcs := []*v1.PersistentVolumeClaim{} + pvclaims2d := [][]*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedvmfs2URL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfs2URL) + }() + + ginkgo.By("create SPBM policy with lzt volume allocation") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var policyID *pbmtypes.PbmProfileId + + policyID, policyName = createVmfsStoragePolicy( + ctx, pc, lztAllocType, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + policyNames = append(policyNames, policyName) + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, "", nil, "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim2, err := createPVC(client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim, pvclaim2) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim}) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim2}) + + defer func() { + if vanillaCluster { + ginkgo.By("Delete the SCs created in step 2") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volIds := []string{} + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + for i, pv := range pvs { + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[i/2]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + volIds = append(volIds, volumeID) + } + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + for i, pvc := range pvcs { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volIds[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + ginkgo.By("Create pods with using the PVCs created in step 3 and wait for them to be ready") + ginkgo.By("verify we can read and write on the PVCs") + pods := createMultiplePods(ctx, client, pvclaims2d, true) + defer func() { + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, volId := range volIds { + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volId) + framework.Logf("Volume is present on %s for volume: %s", dsUrlWhereVolumeIsPresent, volId) + e2eVSphere.verifyDatastoreMatch(volId, datastoreUrls) + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + } + } + framework.Logf("dest url: %s", destDsUrl) + volIdToDsUrlMap[volId] = destDsUrl + } + + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 100mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=100").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("delete pod1") + deletePodsAndWaitForVolsToDetach(ctx, client, []*v1.Pod{pods[1]}, true) + + ginkgo.By("Start relocation of volume to a different datastore") + for _, volId := range volIds { + dsRefDest := getDsMoRefFromURL(ctx, volIdToDsUrlMap[volId]) + task, err := e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volId, dsRefDest, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volIdToCnsRelocateVolTask[volId] = task + framework.Logf("Waiting for a few seconds for relocation to be started properly on VC") + time.Sleep(time.Duration(10) * time.Second) + } + + ginkgo.By("Add labels to volumes and write IO to pod while relocating volume to different datastore") + err = updateVmfsPolicyAlloctype(ctx, pc, eztAllocType, policyName, policyID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var wg sync.WaitGroup + wg.Add(3 + len(volIds)) + go writeKnownData2PodInParallel(f, pods[0], testdataFile, &wg) + go updatePvcLabelsInParallel(ctx, client, namespace, labels, pvcs, &wg) + go updatePvLabelsInParallel(ctx, client, namespace, labels, pvs, &wg) + for _, volId := range volIds { + go reconfigPolicyParallel(ctx, volId, policyID.UniqueId, &wg) + + } + wg.Wait() + + for _, pvclaim := range pvcs { + ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pvc %s in namespace %s", + labels, pvclaim.Name, namespace)) + pv := getPvFromClaim(client, namespace, pvclaim.Name) + err = e2eVSphere.waitForLabelsToBeUpdated(pv.Spec.CSI.VolumeHandle, labels, + string(cnstypes.CnsKubernetesEntityTypePVC), pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pv %s", + labels, pv.Name)) + err = e2eVSphere.waitForLabelsToBeUpdated(pv.Spec.CSI.VolumeHandle, labels, + string(cnstypes.CnsKubernetesEntityTypePV), pv.Name, pv.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Verify the data on the PVCs match what was written in step 7") + verifyKnownDataInPod(f, pods[0], testdataFile) + + for _, volId := range volIds { + ginkgo.By(fmt.Sprintf("Wait for relocation task to complete for volumeID: %s", volId)) + waitForCNSTaskToComplete(ctx, volIdToCnsRelocateVolTask[volId]) + ginkgo.By("Verify relocation of volume is successful") + e2eVSphere.verifyDatastoreMatch(volId, []string{volIdToDsUrlMap[volId]}) + storagePolicyExists, err := e2eVSphere.VerifySpbmPolicyOfVolume(volId, policyNames[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volId, true) + } + + ginkgo.By("Delete the pod created") + err = fpod.DeletePodWithWait(client, pods[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* + Start attached volume's conversion while creation of snapshot and + relocation of volume in parallel + Steps for offline volumes: + 1. Create a SPBM policy with lzt volume allocation for vmfs datastore. + 2. Create SC using policy created in step 1 + 3. Create PVC using SC created in step 2 + 4. Verify that pvc created in step 3 are bound + 5. Create a pod, say pod1 using pvc created in step 4. + 6. Start writing some IO to pod. + 7. Delete pod1. + 8. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 9. While relocation is running perform volume conversion + and create a snapshot in parallel. + 10. Verify relocation was successful. + 11. Verify online volume conversion is successful. + 12. Delete all the objects created during the test. + + Steps for online volumes: + 1. Create a SPBM policy with lzt volume allocation for vmfs datastore. + 2. Create SC using policy created in step 1 + 3. Create PVC using SC created in step 2 + 4. Verify that pvc created in step 3 are bound + 5. Create a pod, say pod1 using pvc created in step 4. + 6. Start writing some IO to pod which run in parallel to steps 6-7. + 7. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 8. While relocation is running perform volume conversion + and create a snapshot in parallel. + 9. Verify the IO written so far. + 10. Verify relocation was successful. + 11. Verify online volume conversion is successful. + 12. Delete all the objects created during the test. + */ + ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized]"+ + " Start attached volume's conversion while creation of snapshot and"+ + " relocation of volume in parallel", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sharedvmfsURL, sharedvmfs2URL := "", "" + var datastoreUrls []string + var policyName string + volIdToDsUrlMap := make(map[string]string) + volIdToCnsRelocateVolTask := make(map[string]*object.Task) + snapToVolIdMap := make(map[string]string) + + sharedvmfsURL = os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedvmfs2URL = os.Getenv(envSharedVMFSDatastore2URL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastore2URL)) + } + datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedvmfs2URL) + + scParameters := make(map[string]string) + policyNames := []string{} + pvcs := []*v1.PersistentVolumeClaim{} + pvclaims2d := [][]*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedvmfs2URL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfs2URL) + }() + + ginkgo.By("create SPBM policy with lzt volume allocation") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var policyID *pbmtypes.PbmProfileId + + policyID, policyName = createVmfsStoragePolicy( + ctx, pc, lztAllocType, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + policyNames = append(policyNames, policyName) + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, "", nil, "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaim2, err := createPVC(client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim, pvclaim2) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim}) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim2}) + + defer func() { + if vanillaCluster { + ginkgo.By("Delete the SCs created in step 2") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volIds := []string{} + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + for i, pv := range pvs { + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[i/2]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + volIds = append(volIds, volumeID) + } + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + for i, pvc := range pvcs { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volIds[i]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }() + + ginkgo.By("Create pods with using the PVCs created in step 3 and wait for them to be ready") + ginkgo.By("verify we can read and write on the PVCs") + pods := createMultiplePods(ctx, client, pvclaims2d, true) + defer func() { + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, volId := range volIds { + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volId) + framework.Logf("Volume is present on %s for volume: %s", dsUrlWhereVolumeIsPresent, volId) + e2eVSphere.verifyDatastoreMatch(volId, datastoreUrls) + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + } + } + framework.Logf("dest url: %s", destDsUrl) + volIdToDsUrlMap[volId] = destDsUrl + } + + snaps := []*snapV1.VolumeSnapshot{} + //Get snapshot client using the rest config + restConfig := getRestConfigClient() + snapc, err := snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, + getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Volume snapshot class with name %q created", volumeSnapshotClass.Name) + + defer func() { + err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete( + ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 100mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=100").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("delete pod1") + deletePodsAndWaitForVolsToDetach(ctx, client, []*v1.Pod{pods[1]}, true) + + ginkgo.By("Start relocation of volume to a different datastore") + for _, volId := range volIds { + dsRefDest := getDsMoRefFromURL(ctx, volIdToDsUrlMap[volId]) + task, err := e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volId, dsRefDest, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volIdToCnsRelocateVolTask[volId] = task + framework.Logf("Waiting for a few seconds for relocation to be started properly on VC") + time.Sleep(time.Duration(10) * time.Second) + } + + ginkgo.By("Perform volume conversion and write IO to pod and" + + " create a snapshot while relocating volume to different datastore") + err = updateVmfsPolicyAlloctype(ctx, pc, eztAllocType, policyName, policyID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var wg sync.WaitGroup + ch := make(chan *snapV1.VolumeSnapshot) + lock := &sync.Mutex{} + wg.Add(1 + 2*len(volIds)) + go writeKnownData2PodInParallel(f, pods[0], testdataFile, &wg) + for i := range volIds { + go reconfigPolicyParallel(ctx, volIds[i], policyID.UniqueId, &wg) + go createSnapshotInParallel(ctx, namespace, snapc, pvcs[i].Name, volumeSnapshotClass.Name, + ch, lock, &wg) + go func(volID string) { + for v := range ch { + snaps = append(snaps, v) + snapToVolIdMap[v.Name] = volID + } + }(volIds[i]) + } + wg.Wait() + + ginkgo.By("Verify the data on the PVCs match what was written in step 7") + verifyKnownDataInPod(f, pods[0], testdataFile) + + for _, snap := range snaps { + volumeSnapshot := snap + ginkgo.By("Verify volume snapshot is created") + volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("snapshot restore size is : %s", volumeSnapshot.Status.RestoreSize.String()) + gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(pvclaim.Spec.Resources.Requests[v1.ResourceStorage])).To( + gomega.BeZero()) + ginkgo.By("Verify volume snapshot content is created") + snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) + + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshothandle := *snapshotContent.Status.SnapshotHandle + snapshotId := strings.Split(snapshothandle, "+")[1] + + defer func() { + framework.Logf("Delete volume snapshot %v", volumeSnapshot.Name) + err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete( + ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Verify snapshot entry %v is deleted from CNS for volume %v", snapshotId, + snapToVolIdMap[volumeSnapshot.Name]) + err = waitForCNSSnapshotToBeDeleted(snapToVolIdMap[volumeSnapshot.Name], snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Query CNS and check the volume snapshot entry") + err = waitForCNSSnapshotToBeCreated(snapToVolIdMap[volumeSnapshot.Name], snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + for _, volId := range volIds { + ginkgo.By(fmt.Sprintf("Wait for relocation task to complete for volumeID: %s", volId)) + waitForCNSTaskToComplete(ctx, volIdToCnsRelocateVolTask[volId]) + ginkgo.By("Verify relocation of volume is successful") + e2eVSphere.verifyDatastoreMatch(volId, []string{volIdToDsUrlMap[volId]}) + storagePolicyExists, err := e2eVSphere.VerifySpbmPolicyOfVolume(volId, policyNames[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volId, true) + } + + ginkgo.By("Delete the pod created") + err = fpod.DeletePodWithWait(client, pods[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + +}) + +// fillVolumesInPods fills the volumes in pods after leaving 100m for FS metadata +func fillVolumeInPods(f *framework.Framework, pods []*v1.Pod) { + for _, pod := range pods { + size, err := getFSSizeMb(f, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + writeRandomDataOnPod(pod, size-100) // leaving 100m for FS metadata + } +} + +// writeRandomDataOnPod runs dd on the given pod and write count in Mib +func writeRandomDataOnPod(pod *v1.Pod, count int64) { + cmd := []string{"--namespace=" + pod.Namespace, "-c", pod.Spec.Containers[0].Name, "exec", pod.Name, "--", + "/bin/sh", "-c", "dd if=/dev/urandom of=/mnt/volume1/f1 bs=1M count=" + strconv.FormatInt(count, 10)} + _ = e2ekubectl.RunKubectlOrDie(pod.Namespace, cmd...) } // setVpxdTaskTimeout sets vpxd task timeout to given number of seconds @@ -1474,13 +3123,14 @@ func setVpxdTaskTimeout(ctx context.Context, taskTimeout int) { func writeKnownData2PodInParallel( f *framework.Framework, pod *v1.Pod, testdataFile string, wg *sync.WaitGroup, size ...int64) { + defer ginkgo.GinkgoRecover() defer wg.Done() writeKnownData2Pod(f, pod, testdataFile, size...) } // writeKnownData2Pod writes known 1mb data to a file in given pod's volume until 200mb is left in the volume func writeKnownData2Pod(f *framework.Framework, pod *v1.Pod, testdataFile string, size ...int64) { - _ = framework.RunKubectlOrDie(pod.Namespace, "cp", testdataFile, fmt.Sprintf( + _ = e2ekubectl.RunKubectlOrDie(pod.Namespace, "cp", testdataFile, fmt.Sprintf( "%v/%v:/mnt/volume1/testdata", pod.Namespace, pod.Name)) fsSize, err := getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1494,11 +3144,11 @@ func writeKnownData2Pod(f *framework.Framework, pod *v1.Pod, testdataFile string seek := fmt.Sprintf("%v", i) cmd := []string{"--namespace=" + pod.Namespace, "-c", pod.Spec.Containers[0].Name, "exec", pod.Name, "--", "/bin/sh", "-c", "dd if=/mnt/volume1/testdata of=/mnt/volume1/f1 bs=1M count=100 seek=" + seek} - _ = framework.RunKubectlOrDie(pod.Namespace, cmd...) + _ = e2ekubectl.RunKubectlOrDie(pod.Namespace, cmd...) } cmd := []string{"--namespace=" + pod.Namespace, "-c", pod.Spec.Containers[0].Name, "exec", pod.Name, "--", "/bin/sh", "-c", "rm /mnt/volume1/testdata"} - _ = framework.RunKubectlOrDie(pod.Namespace, cmd...) + _ = e2ekubectl.RunKubectlOrDie(pod.Namespace, cmd...) } // verifyKnownDataInPod verify known data on a file in given pod's volume in 100mb loop @@ -1515,9 +3165,9 @@ func verifyKnownDataInPod(f *framework.Framework, pod *v1.Pod, testdataFile stri skip := fmt.Sprintf("%v", i) cmd := []string{"--namespace=" + pod.Namespace, "-c", pod.Spec.Containers[0].Name, "exec", pod.Name, "--", "/bin/sh", "-c", "dd if=/mnt/volume1/f1 of=/mnt/volume1/testdata bs=1M count=100 skip=" + skip} - _ = framework.RunKubectlOrDie(pod.Namespace, cmd...) - _ = framework.RunKubectlOrDie(pod.Namespace, "cp", - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", pod.Namespace, pod.Name), + _ = e2ekubectl.RunKubectlOrDie(pod.Namespace, cmd...) + _ = e2ekubectl.RunKubectlOrDie(pod.Namespace, "cp", + fmt.Sprintf("%v/%v:mnt/volume1/testdata", pod.Namespace, pod.Name), testdataFile+pod.Name) framework.Logf("Running diff with source file and file from pod %v for 100M starting %vM", pod.Name, skip) op, err := exec.Command("diff", testdataFile, testdataFile+pod.Name).Output() @@ -1526,3 +3176,10 @@ func verifyKnownDataInPod(f *framework.Framework, pod *v1.Pod, testdataFile stri gomega.Expect(len(op)).To(gomega.BeZero()) } } + +func reconfigPolicyParallel(ctx context.Context, volID string, policyId string, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() + defer wg.Done() + err := e2eVSphere.reconfigPolicy(ctx, volID, policyId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} diff --git a/tests/e2e/preferential_topology.go b/tests/e2e/preferential_topology.go index 3bf78831d4..ae2e1e2d01 100644 --- a/tests/e2e/preferential_topology.go +++ b/tests/e2e/preferential_topology.go @@ -78,6 +78,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision vcAddress string sshClientConfig *ssh.ClientConfig nimbusGeneratedK8sVmPwd string + clientIndex int ) ginkgo.BeforeEach(func() { @@ -172,7 +173,9 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision } //set preferred datatsore time interval - setPreferredDatastoreTimeInterval(client, ctx, csiNamespace, namespace, csiReplicas) + setPreferredDatastoreTimeInterval(client, ctx, csiNamespace, csiReplicas, false) + + clientIndex = 0 }) @@ -187,11 +190,11 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision gomega.Expect(err).NotTo(gomega.HaveOccurred()) } framework.Logf("Perform preferred datastore tags cleanup after test completion") - err = deleteTagCreatedForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks) + err = deleteTagCreatedForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Recreate preferred datastore tags post cleanup") - err = createTagForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks) + err = createTagForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if isSPSServiceStopped { @@ -230,7 +233,8 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision 18. Perform cleanup. Delete StatefulSet, PVC, PV and SC. 19. Remove datastore preference tags as part of cleanup. */ - ginkgo.It("Tag single preferred datastore each in rack-1 and rack-2 and verify it is honored", func() { + ginkgo.It("Tag single preferred datastore each in rack-1 and rack-2 "+ + "and verify it is honored", ginkgo.Label(p0, topology, preferential, block, vanilla, level5), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() preferredDatastoreChosen = 1 @@ -239,13 +243,13 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning in rack-2(cluster-2))") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastoreRack2 := preferredDatastorePaths[0] defer func() { ginkgo.By("Remove preferred datastore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastoreRack2, - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -292,23 +296,24 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack2, false, false) + nonShareddatastoreListMapRack2, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // choose preferred datastore in rack-1 ginkgo.By("Tag preferred datatstore for volume provisioning in rack-1(cluster-1))") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove preferred datastore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -365,12 +370,13 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack1) + nonShareddatastoreListMapRack1, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack1) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack1, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -407,19 +413,19 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning in rack-3(cluster-3))") preferredDatastorePaths, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) defer func() { ginkgo.By("Remove preferred datatsore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[2]) + allowedTopologyRacks[2], false, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -465,29 +471,32 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack3DatastoreListMap, false, false) + rack3DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack3, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack3, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaleup replicas = 10 ginkgo.By("Scale up statefulset replica count from 3 to 10") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack3DatastoreListMap, false, false) + rack3DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack3, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack3, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -527,18 +536,18 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning in rack-2(cluster-2))") preferredDatastorePaths, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastoreChosen = 1 preferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) defer func() { ginkgo.By("Remove preferred datastore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -587,34 +596,38 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaleup replicas = 10 ginkgo.By("Scale up statefulset replica count from 3 to 10") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaledown replicas = 5 ginkgo.By("Scale down statefulset replica count from 10 to 5") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -645,12 +658,12 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning in rack-1(cluster-1))") preferredDatastorePaths, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove preferred datatsore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -698,13 +711,14 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - shareddatastoreListMap, true, false) + shareddatastoreListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -735,7 +749,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning in rack-2(cluster-2))") preferredDatastorePaths, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = tagSameDatastoreAsPreferenceToDifferentRacks(masterIp, sshClientConfig, allowedTopologyRacks[1], preferredDatastoreChosen, preferredDatastorePaths) @@ -744,7 +758,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision ginkgo.By("Remove preferred datatsore tag") for j := 0; j < len(allowedTopologyRacks)-1; j++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[j]) + allowedTopologyRacks[j], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -792,13 +806,14 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - shareddatastoreListMap, false, false) + shareddatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -832,7 +847,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := 1; i < len(allowedTopologyRacks); i++ { err = tagSameDatastoreAsPreferenceToDifferentRacks(masterIp, sshClientConfig, allowedTopologyRacks[i], @@ -843,7 +858,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision ginkgo.By("Remove preferred datatsore tag") for j := 0; j < len(allowedTopologyRacks); j++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[j]) + allowedTopologyRacks[j], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -889,13 +904,14 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - shareddatastoreListMap, true, false) + shareddatastoreListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -925,12 +941,12 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove the preferred datastore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[2]) + allowedTopologyRacks[2], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -975,13 +991,14 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - shareddatastoreListMap, true, false) + shareddatastoreListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1019,7 +1036,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning in rack-1(cluster-1))") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -1064,27 +1081,28 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack1, false, false) + nonShareddatastoreListMapRack1, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologyForRack1, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologyForRack1, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datatsore tag which is chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new preferred datastore from rack-1 for volume provisioning") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove the datastore preference chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -1137,12 +1155,13 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - shareddatastoreListMap) + shareddatastoreListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack1) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack1, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1187,7 +1206,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning in rack-3(cluster-3))") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastore := preferredDatastorePaths @@ -1233,28 +1252,29 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack3, false, false) + nonShareddatastoreListMapRack3, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologyForRack3, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologyForRack3, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datatsore tag chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[2]) + allowedTopologyRacks[2], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new preferred datatsore for volume provisioning in rack-3") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, nonShareddatastoreListMapRack3, preferredDatastorePaths) + preferredDatastoreChosen, nonShareddatastoreListMapRack3, preferredDatastorePaths, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove preferred datatsore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[2]) + allowedTopologyRacks[2], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -1308,29 +1328,32 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack3) + nonShareddatastoreListMapRack3, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack3) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack3, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaleup sts1Replicas = 10 ginkgo.By("Scale up statefulset replica count from 3 to 10") preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) - scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false) + err = scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume is provisioned on the preferred datastore ginkgo.By("Verify volume is provisioned on the specified datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack3, false, true) + nonShareddatastoreListMapRack3, false, true, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologyForRack3, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologyForRack3, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1380,7 +1403,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning in rack-1(cluster-1))") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -1425,22 +1448,23 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verify volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - shareddatastoreListMap, false, false) + shareddatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologyForRack1, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologyForRack1, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new preferred datatsore for volume provisioning in rack-1(cluster-1)") preferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) @@ -1451,41 +1475,44 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // perform statefulset scaleup sts1Replicas = 13 ginkgo.By("Scale up statefulset replica count from 3 to 13") - scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false) + err = scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the specified datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - rack1DatastoreListMap, false, false) + rack1DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologyForRack1, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologyForRack1, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaledown sts1Replicas = 6 ginkgo.By("Scale down statefulset replica count from 13 to 6") - scaleDownStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false) + err = scaleDownStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[1], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new datastore chosen for volume provisioning") preferredDatastore, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, preferredDatastorePaths) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, preferredDatastorePaths, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) defer func() { ginkgo.By("Remove preferred datastore tags chosen for volume provisioning") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -1495,18 +1522,20 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // perform statefulset scaleup sts1Replicas = 20 ginkgo.By("Scale up statefulset replica count from 6 to 20") - scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false) + err = scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the specified datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - rack1DatastoreListMap, false, false) + rack1DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologyForRack1, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologyForRack1, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1566,7 +1595,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning in rack-1(cluster-1)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -1610,22 +1639,23 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verify volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack1, true, false) + nonShareddatastoreListMapRack1, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datatsore tag which was chosen for volume provisioning in rack-1") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new preferred datatsore for volume provisioning in rack-2(cluster-2)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -1635,32 +1665,34 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // perform statefulset scaleup sts1Replicas = 7 ginkgo.By("Scale up statefulset replica count from 3 to 7") - scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false) + err = scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verify volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack2, true, false) + nonShareddatastoreListMapRack2, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove the datastore preference chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new preferred datatsore for volume provisioning in rack-3(cluster-3)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove the datastore preference chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[2]) + allowedTopologyRacks[2], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -1672,18 +1704,20 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // perform statefulset scaleup sts1Replicas = 13 ginkgo.By("Scale up statefulset replica count from 7 to 13") - scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false) + err = scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // verify volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack3, true, false) + nonShareddatastoreListMapRack3, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1720,20 +1754,20 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning in rack-2(cluster-2)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastoreChosen = 1 preferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove preferred datastore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -1777,13 +1811,14 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verify volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify affinity details ginkgo.By("Verify node and pv topology affinity details") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1816,17 +1851,17 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning in rack-2(cluster-2)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) defer func() { ginkgo.By("Remove preferred datastore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -1895,12 +1930,12 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag preferred datatsore for volume provisioning in rack-2(cluster-2)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove preferred datastore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -1956,12 +1991,13 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, storagePolicyDs, - nonShareddatastoreListMapRack2) + nonShareddatastoreListMapRack2, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack2) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack2, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1999,24 +2035,24 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag different preferred datatsores in different racks") preferredDatastore1, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore1...) preferredDatastore2, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore2...) preferredDatastore3, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore3...) defer func() { for i := 0; i < len(allowedTopologyRacks); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[i]) + allowedTopologyRacks[i], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2079,29 +2115,32 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision //verify volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - allDatastoresListMap, true, false) + allDatastoresListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify affinity details ginkgo.By("Verify node and pv topology affinity details") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaleup sts1Replicas = 13 ginkgo.By("Scale up statefulset replica count from 7 to 13") - scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false) + err = scaleUpStatefulSetPod(ctx, client, sts1, namespace, sts1Replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, sts1, namespace, preferredDatastorePaths, - allDatastoresListMap, true, false) + allDatastoresListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify affinity details ginkgo.By("Verify node and pv topology affinity details") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, sts1, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -2130,17 +2169,17 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Tag rack-1 to preferred datatsore which is accessible only on rack-2") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastoreChosen = 1 preferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore...) defer func() { for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2203,12 +2242,13 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - rack2DatastoreListMap) + rack2DatastoreListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -2253,7 +2293,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Assign Tags to preferred datatsore for volume provisioning in rack-2(cluster-2)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for i := 1; i < len(allowedTopologyRacks); i++ { err = tagSameDatastoreAsPreferenceToDifferentRacks(masterIp, sshClientConfig, allowedTopologyRacks[i], @@ -2263,7 +2303,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision defer func() { for i := 0; i < len(allowedTopologyRacks); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[i]) + allowedTopologyRacks[i], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2320,12 +2360,13 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, storagePolicyDs, - nonShareddatastoreListMapRack2) + nonShareddatastoreListMapRack2, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack2) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack2, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -2354,11 +2395,11 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Assign Tags to preferred datatsore for volume provisioning") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2380,7 +2421,7 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision ginkgo.By("Expect claim to fail provisioning volume") framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, pollTimeoutShort, framework.PollShortTimeout)) - expectedErrMsg := "failed to get shared datastores for topology requirement" + expectedErrMsg := "failed to create volume" err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) }) @@ -2419,12 +2460,12 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision // choose preferred datastore ginkgo.By("Assign Tag to preferred datatsore for volume provisioning in rack-2(cluster-2)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) } gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2552,13 +2593,14 @@ var _ = ginkgo.Describe("[Preferential-Topology] Preferential-Topology-Provision ginkgo.By("Verify volume provisioning for Pod-1/Pod-2") for i := 0; i < len(podList); i++ { verifyVolumeProvisioningForStandalonePods(ctx, client, podList[i], namespace, - preferredDatastorePaths, nonShareddatastoreListMapRack2) + preferredDatastorePaths, nonShareddatastoreListMapRack2, false, nil) } ginkgo.By("Verify pv and pod node affinity details for pv-1/pod-1 and pv-2/pod-2") for i := 0; i < len(podList); i++ { - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologyForRack2) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], + namespace, allowedTopologyForRack2, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) diff --git a/tests/e2e/preferential_topology_disruptive.go b/tests/e2e/preferential_topology_disruptive.go index ca0ef87d08..3693c8681b 100644 --- a/tests/e2e/preferential_topology_disruptive.go +++ b/tests/e2e/preferential_topology_disruptive.go @@ -73,6 +73,7 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog csiNamespace string sshClientConfig *ssh.ClientConfig nimbusGeneratedK8sVmPwd string + clientIndex int ) ginkgo.BeforeEach(func() { var cancel context.CancelFunc @@ -167,7 +168,9 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog topologyLength, leafNode, leafNodeTag1) //set preferred datatsore time interval - setPreferredDatastoreTimeInterval(client, ctx, csiNamespace, namespace, csiReplicas) + setPreferredDatastoreTimeInterval(client, ctx, csiNamespace, csiReplicas, false) + + clientIndex = 0 }) @@ -182,11 +185,11 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog gomega.Expect(err).NotTo(gomega.HaveOccurred()) } framework.Logf("Perform preferred datastore tags cleanup after test completion") - err = deleteTagCreatedForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks) + err = deleteTagCreatedForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Recreate preferred datastore tags post cleanup") - err = createTagForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks) + err = createTagForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -250,7 +253,7 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog ginkgo.By("Tag preferred datastore for volume provisioning") for i := 0; i < len(allowedTopologyRacks); i++ { preferredDatastorePath, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[i], - preferredDatastoreChosen, datastorestMap[i], nil) + preferredDatastoreChosen, datastorestMap[i], nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastorePath...) } @@ -302,25 +305,26 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - allDatastoresListMap, true, false) + allDatastoresListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") for i := 0; i < len(allowedTopologyRacks); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[i]) + allowedTopologyRacks[i], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } ginkgo.By("Tag new preferred datatsore for volume provisioning") for i := 0; i < len(allowedTopologyRacks); i++ { preferredDatastorePath, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[i], - preferredDatastoreChosen, datastorestMap[i], preferredDatastorePaths) + preferredDatastoreChosen, datastorestMap[i], preferredDatastorePaths, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastorePath...) preferredDatastorePathsToDel = append(preferredDatastorePathsToDel, preferredDatastorePath...) @@ -329,7 +333,7 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog ginkgo.By("Preferred datastore tags cleanup") for i := 0; i < len(allowedTopologyRacks); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePathsToDel[i], - allowedTopologyRacks[i]) + allowedTopologyRacks[i], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -373,7 +377,8 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog ginkgo.By("Scale up statefulset replica and verify the replica count") replicas = 10 - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, true) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulset) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -381,35 +386,37 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - allDatastoresListMap, true, false) + allDatastoresListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datastore tag in rack-2(cluster-2)") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[4], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new preferred datatsore for volume provisioning") preferredDatastoreRack2New, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastoreRack2New...) defer func() { ginkgo.By("Remove preferred datastore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastoreRack2New[0], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() // Scale down statefulSets replica count replicas = 5 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, true) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown = GetListOfPodsInSts(client, statefulset) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -436,7 +443,8 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog // Scale up statefulSets replicas count ginkgo.By("Scale up statefulset replica and verify the replica count") replicas = 13 - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, true) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown = GetListOfPodsInSts(client, statefulset) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -444,13 +452,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - allDatastoresListMap, true, false) + allDatastoresListMap, true, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -494,14 +503,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog ginkgo.By("Tag preferred datastore for volume provisioning in rack-2(cluster-2)") preferredDatastore1, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !strings.Contains(preferredDatastore1[0], "nfs") { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastore1, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastore1) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastore1, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore1...) } else { @@ -509,14 +518,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog } preferredDatastore2, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore2...) defer func() { ginkgo.By("Remove preferred datastore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -557,13 +566,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Migrate the worker vms residing on the nfs datatsore before " + "making datastore inaccessible") @@ -615,56 +625,62 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - rack2DatastoreListMap) + rack2DatastoreListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack2) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack2, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaleup replicas = 15 ginkgo.By("Scale up statefulset replica count from 10 to 15") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Power on the inaccessible datastore") datastoreOp = "on" powerOnPreferredDatastore(datastoreName, datastoreOp) ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") - err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], allowedTopologyRacks[1]) + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) newPreferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastorePaths) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastorePaths, false, clientIndex) preferredDatastorePaths = append(preferredDatastorePaths, newPreferredDatastore...) // perform statefulset scaleup replicas = 20 ginkgo.By("Scale up statefulset replica count from 15 to 20") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -708,14 +724,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog ginkgo.By("Tag preferred datastore for volume provisioning in rack-1(cluster-1)") preferredDatastore1, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !strings.Contains(preferredDatastore1[0], "nfs") { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastore1, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, preferredDatastore1) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, preferredDatastore1, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore1...) } else { @@ -723,14 +739,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog } preferredDatastore2, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore2...) defer func() { ginkgo.By("Remove preferred datastore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -772,13 +788,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack1DatastoreListMap, false, false) + rack1DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack1, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack1, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Migrate all the worker vms residing on the preferred datatsore before " + "putting it into maintenance mode") @@ -835,28 +852,31 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - rack1DatastoreListMap) + rack1DatastoreListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack1) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack1, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaleup replicas = 15 ginkgo.By("Scale up statefulset replica count from 10 to 15") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack1DatastoreListMap, false, false) + rack1DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack1, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack1, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Exit datastore from Maintenance mode") err = exitDatastoreFromMaintenanceMode(masterIp, sshClientConfig, dataCenters, preferredDatastore1[0]) @@ -864,28 +884,31 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog isDatastoreInMaintenanceMode = false ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") - err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], allowedTopologyRacks[0]) + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) newPreferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, preferredDatastorePaths) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, preferredDatastorePaths, false, clientIndex) preferredDatastorePaths = append(preferredDatastorePaths, newPreferredDatastore...) // perform statefulset scaleup replicas = 20 ginkgo.By("Scale up statefulset replica count from 15 to 20") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack1DatastoreListMap, false, false) + rack1DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack1, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack1, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -929,14 +952,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog ginkgo.By("Tag preferred datastore for volume provisioning in rack-2(cluster-2)") preferredDatastore1, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if !strings.Contains(preferredDatastore1[0], "nfs") { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastore1, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastore1) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastore1, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore1...) } else { @@ -944,7 +967,7 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog } preferredDatastore2, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastore2...) @@ -952,7 +975,7 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog ginkgo.By("Remove preferred datastore tag") for i := 0; i < len(preferredDatastorePaths); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -995,13 +1018,14 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Migrate all the worker vms residing on the nfs datatsore before " + "making datastore inaccessible") @@ -1054,55 +1078,61 @@ var _ = ginkgo.Describe("[Disruptive-Preferential-Topology] Preferential-Topolog // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - rack2DatastoreListMap) + rack2DatastoreListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack2) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack2, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // perform statefulset scaleup replicas = 15 ginkgo.By("Scale up statefulset replica count from 10 to 15") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Power on the suspended datastore") datastoreOp = "on" powerOnPreferredDatastore(datastoreName, datastoreOp) ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") - err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], allowedTopologyRacks[1]) + err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastore1[0], + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) newPreferredDatastore, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastorePaths) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastorePaths, false, clientIndex) preferredDatastorePaths = append(preferredDatastorePaths, newPreferredDatastore...) // perform statefulset scaleup replicas = 20 ginkgo.By("Scale up statefulset replica count from 15 to 20") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - rack2DatastoreListMap, false, false) + rack2DatastoreListMap, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack2, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack2, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) diff --git a/tests/e2e/preferential_topology_snapshot.go b/tests/e2e/preferential_topology_snapshot.go index 4231e8cbf5..1dd73787b9 100644 --- a/tests/e2e/preferential_topology_snapshot.go +++ b/tests/e2e/preferential_topology_snapshot.go @@ -38,7 +38,7 @@ import ( fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" - snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" ) var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology Volume Snapshot tests", func() { @@ -81,6 +81,7 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology csiNamespace string sshClientConfig *ssh.ClientConfig nimbusGeneratedK8sVmPwd string + clientIndex int ) ginkgo.BeforeEach(func() { @@ -187,7 +188,9 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology csiReplicas = *csiDeployment.Spec.Replicas //set preferred datatsore time interval - setPreferredDatastoreTimeInterval(client, ctx, csiNamespace, namespace, csiReplicas) + setPreferredDatastoreTimeInterval(client, ctx, csiNamespace, csiReplicas, false) + + clientIndex = 0 }) ginkgo.AfterEach(func() { @@ -201,11 +204,11 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology gomega.Expect(err).NotTo(gomega.HaveOccurred()) } framework.Logf("Perform preferred datastore tags cleanup after test completion") - err = deleteTagCreatedForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks) + err = deleteTagCreatedForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Recreate preferred datastore tags post cleanup") - err = createTagForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks) + err = createTagForPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -242,12 +245,12 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning in rack-1(cluster-1))") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack1, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove preferred datastore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[0]) + allowedTopologyRacks[0], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -287,11 +290,11 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology ginkgo.By("Create volume snapshot class, volume snapshot") volumeSnapshot, volumeSnapshotClass, snapshotId := createSnapshotClassAndVolSnapshot(ctx, snapc, namespace, - pvclaim, volHandle, false) + pvclaim, volHandle, false, false) defer func() { ginkgo.By("Perform cleanup of snapshot created") performCleanUpForSnapshotCreated(ctx, snapc, namespace, volHandle, volumeSnapshot, snapshotId, - volumeSnapshotClass) + volumeSnapshotClass, pandoraSyncWaitTime, false) }() ginkgo.By("Create PVC from snapshot") @@ -332,12 +335,13 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack1) + nonShareddatastoreListMapRack1, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologyForRack1) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologyForRack1, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -369,7 +373,7 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology // choose preferred datastore ginkgo.By("Tag preferred datastore for volume provisioning in rack-2(cluster-2))") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for %v for preferred datastore to get refreshed in the environment", @@ -409,26 +413,26 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology ginkgo.By("Create volume snapshot class, volume snapshot") volumeSnapshot, volumeSnapshotClass, snapshotId := createSnapshotClassAndVolSnapshot(ctx, snapc, namespace, - pvclaim, volHandle, false) + pvclaim, volHandle, false, false) defer func() { ginkgo.By("Perform cleanup of snapshot created") performCleanUpForSnapshotCreated(ctx, snapc, namespace, volHandle, volumeSnapshot, snapshotId, - volumeSnapshotClass) + volumeSnapshotClass, pandoraSyncWaitTime, false) }() ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Tag new preferred datatsore for volume provisioning") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[1], - preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastorePaths) + preferredDatastoreChosen, nonShareddatastoreListMapRack2, preferredDatastorePaths, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By("Remove preferred datastore tag") err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[1]) + allowedTopologyRacks[1], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -443,9 +447,10 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expect claim to fail provisioning volume within the topology") - framework.ExpectError(fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, - client, pvclaim2.Namespace, pvclaim2.Name, pollTimeoutShort, framework.PollShortTimeout)) - expectedErrMsg := "failed to get the compatible datastore for create volume from snapshot" + err = fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, + client, pvclaim2.Namespace, pvclaim2.Name, framework.Poll, framework.ClaimProvisionTimeout) + gomega.Expect(err).To(gomega.HaveOccurred()) + expectedErrMsg := "failed to get the compatible shared datastore for create volume from snapshot" err = waitForEvent(ctx, client, namespace, expectedErrMsg, pvclaim2.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Expected error : %q", expectedErrMsg)) defer func() { @@ -527,13 +532,13 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology ginkgo.By("Tag different preferred datastore for volume provisioning in different racks") for i := 0; i < len(allowedTopologyRacks); i++ { preferredDatastorePath, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[i], - preferredDatastoreChosen, datastorestMap[i], nil) + preferredDatastoreChosen, datastorestMap[i], nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, preferredDatastorePath...) } sharedPreferredDatastorePaths, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[0], - preferredDatastoreChosen, shareddatastoreListMap, nil) + preferredDatastoreChosen, shareddatastoreListMap, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePaths = append(preferredDatastorePaths, sharedPreferredDatastorePaths...) for i := 1; i < len(allowedTopologyRacks); i++ { @@ -544,7 +549,7 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology defer func() { for i := 0; i < len(allowedTopologyRacks); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, sharedPreferredDatastorePaths[0], - allowedTopologyRacks[i]) + allowedTopologyRacks[i], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -672,23 +677,24 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology ginkgo.By("Verify volume is provisioned on the preferred datatsore") for i := 0; i < len(podList); i++ { verifyVolumeProvisioningForStandalonePods(ctx, client, podList[i], namespace, - preferredDatastorePaths, allDatastoresListMap) + preferredDatastorePaths, allDatastoresListMap, false, nil) } ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") for i := 0; i < len(podList); i++ { - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } ginkgo.By("Create volume snapshot class, volume snapshot") volumeSnapshot1, volumeSnapshotClass1, snapshotId1 := createSnapshotClassAndVolSnapshot(ctx, snapc, namespace, - pvclaim1, volHandle1, false) + pvclaim1, volHandle1, false, false) defer func() { ginkgo.By("Perform cleanup of snapshot created") performCleanUpForSnapshotCreated(ctx, snapc, namespace, volHandle1, volumeSnapshot1, snapshotId1, - volumeSnapshotClass1) + volumeSnapshotClass1, pandoraSyncWaitTime, false) }() ginkgo.By("Create PVC-3 from snapshot") @@ -709,7 +715,7 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle3, snapshotId1) + err = verifySnapshotIsDeletedInCNS(volHandle3, snapshotId1, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -733,17 +739,18 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod3, namespace, preferredDatastorePaths, - allDatastoresListMap) + allDatastoresListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod3, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod3, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Remove preferred datastore tag chosen for volume provisioning") for i := 0; i < len(allowedTopologyRacks); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[i], - allowedTopologyRacks[i]) + allowedTopologyRacks[i], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -751,14 +758,14 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology ginkgo.By("Assign new preferred datastore tags for volume provisioning in different racks") for i := 0; i < len(allowedTopologyRacks); i++ { preferredDatastorePath, err := tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[i], - preferredDatastoreChosen, datastorestMap[i], preferredDatastorePaths) + preferredDatastoreChosen, datastorestMap[i], preferredDatastorePaths, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) preferredDatastorePathsNew = append(preferredDatastorePathsNew, preferredDatastorePath...) } defer func() { for i := 0; i < len(allowedTopologyRacks); i++ { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePathsNew[i], - allowedTopologyRacks[i]) + allowedTopologyRacks[i], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() @@ -827,18 +834,19 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod4, namespace, preferredDatastorePathsNew, - allDatastoresListMap) + allDatastoresListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod4, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod4, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) volumeSnapshot2, volumeSnapshotClass2, snapshotId2 := createSnapshotClassAndVolSnapshot(ctx, snapc, namespace, - pvclaim4, volHandle4, false) + pvclaim4, volHandle4, false, false) defer func() { performCleanUpForSnapshotCreated(ctx, snapc, namespace, pv4.Spec.CSI.VolumeHandle, volumeSnapshot2, - snapshotId2, volumeSnapshotClass2) + snapshotId2, volumeSnapshotClass2, pandoraSyncWaitTime, false) }() ginkgo.By("Create PVC-5 from snapshot") @@ -879,12 +887,13 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology // verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") verifyVolumeProvisioningForStandalonePods(ctx, client, pod5, namespace, preferredDatastorePathsNew, - allDatastoresListMap) + allDatastoresListMap, false, nil) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod5, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod5, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -920,11 +929,11 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology ginkgo.By("Tag preferred datastore for volume provisioning in rack-3(cluster-3)") preferredDatastorePaths, err = tagPreferredDatastore(masterIp, sshClientConfig, allowedTopologyRacks[2], - preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil) + preferredDatastoreChosen, nonShareddatastoreListMapRack3, nil, false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { err = detachTagCreatedOnPreferredDatastore(masterIp, sshClientConfig, preferredDatastorePaths[0], - allowedTopologyRacks[2]) + allowedTopologyRacks[2], false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -969,13 +978,14 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack3, false, false) + nonShareddatastoreListMapRack3, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack3, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack3, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Fetching pod 3, pvc3 and pv3 details") pod3, err := client.CoreV1().Pods(namespace).Get(ctx, @@ -995,11 +1005,11 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology ginkgo.By("Create volume snapshot class, volume snapshot") volumeSnapshot, volumeSnapshotClass, snapshotId := createSnapshotClassAndVolSnapshot(ctx, snapc, namespace, - pvclaim3, volHandle3, true) + pvclaim3, volHandle3, true, false) defer func() { ginkgo.By("Perform cleanup of snapshot created") performCleanUpForSnapshotCreated(ctx, snapc, namespace, volHandle3, volumeSnapshot, snapshotId, - volumeSnapshotClass) + volumeSnapshotClass, pandoraSyncWaitTime, false) }() ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) @@ -1043,12 +1053,13 @@ var _ = ginkgo.Describe("[Preferential-Topology-Snapshot] Preferential Topology //verifying volume provisioning ginkgo.By("Verify volume is provisioned on the preferred datatsore") err = verifyVolumeProvisioningForStatefulSet(ctx, client, statefulset, namespace, preferredDatastorePaths, - nonShareddatastoreListMapRack3, false, false) + nonShareddatastoreListMapRack3, false, false, false, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologyForRack3, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologyForRack3, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) diff --git a/tests/e2e/preferential_topology_utils.go b/tests/e2e/preferential_topology_utils.go index 9210995ffb..949ab1d30e 100644 --- a/tests/e2e/preferential_topology_utils.go +++ b/tests/e2e/preferential_topology_utils.go @@ -21,15 +21,14 @@ import ( "fmt" "strings" - snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/vmware/govmomi/object" "golang.org/x/crypto/ssh" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -110,24 +109,45 @@ attachTagToPreferredDatastore method is used to attach the preferred tag to the datastore chosen for volume provisioning */ func attachTagToPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, - datastore string, tagName string) error { - attachTagCat := govcLoginCmd() + - "govc tags.attach -c " + preferredDSCat + " " + tagName + " " + "'" + datastore + "'" - framework.Logf("cmd to attach tag to preferred datastore: %s ", attachTagCat) - attachTagCatRes, err := sshExec(sshClientConfig, masterIp, attachTagCat) - if err != nil && attachTagCatRes.Code != 0 { - fssh.LogResult(attachTagCatRes) - return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", - attachTagCat, masterIp, err) + datastore string, tagName string, isMultiVC bool, clientIndexForMultiVC int) error { + var attachTagCat string + if !isMultiVC { + attachTagCat = govcLoginCmd() + + "govc tags.attach -c " + preferredDSCat + " " + tagName + " " + "'" + datastore + "'" + framework.Logf("cmd to attach tag to preferred datastore: %s ", attachTagCat) + attachTagCatRes, err := sshExec(sshClientConfig, masterIp, attachTagCat) + if err != nil && attachTagCatRes.Code != 0 { + fssh.LogResult(attachTagCatRes) + return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + attachTagCat, masterIp, err) + } + return nil + } else { + attachTagCat = govcLoginCmdForMultiVC(clientIndexForMultiVC) + + "govc tags.attach -c " + preferredDSCat + " " + tagName + " " + "'" + datastore + "'" + framework.Logf("cmd to attach tag to preferred datastore: %s ", attachTagCat) + attachTagCatRes, err := sshExec(sshClientConfig, masterIp, attachTagCat) + if err != nil && attachTagCatRes.Code != 0 { + fssh.LogResult(attachTagCatRes) + return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + attachTagCat, masterIp, err) + } + return nil } - return nil } /* detachTagCreatedOnPreferredDatastore is used to detach the tag created on preferred datastore */ func detachTagCreatedOnPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, - datastore string, tagName string) error { - detachTagCat := govcLoginCmd() + - "govc tags.detach -c " + preferredDSCat + " " + tagName + " " + "'" + datastore + "'" + datastore string, tagName string, isMultiVcSetup bool, clientIndex int) error { + var detachTagCat string + if !isMultiVcSetup { + detachTagCat = govcLoginCmd() + + "govc tags.detach -c " + preferredDSCat + " " + tagName + " " + "'" + datastore + "'" + + } else { + detachTagCat = govcLoginCmdForMultiVC(clientIndex) + + "govc tags.detach -c " + preferredDSCat + " " + tagName + " " + "'" + datastore + "'" + } framework.Logf("cmd to detach the tag assigned to preferred datastore: %s ", detachTagCat) detachTagCatRes, err := sshExec(sshClientConfig, masterIp, detachTagCat) if err != nil && detachTagCatRes.Code != 0 { @@ -219,7 +239,8 @@ chosen preferred datastore or not for statefulsets func verifyVolumeProvisioningForStatefulSet(ctx context.Context, client clientset.Interface, statefulset *appsv1.StatefulSet, namespace string, datastoreNames []string, datastoreListMap map[string]string, - multipleAllowedTopology bool, parallelStatefulSetCreation bool) error { + multipleAllowedTopology bool, parallelStatefulSetCreation bool, + isMultiVcSetup bool, multiVCDsUrls []string) error { counter := 0 stsPodCount := 0 var dsUrls []string @@ -230,22 +251,36 @@ func verifyVolumeProvisioningForStatefulSet(ctx context.Context, ssPodsBeforeScaleDown = fss.GetPodList(client, statefulset) } stsPodCount = len(ssPodsBeforeScaleDown.Items) - for i := 0; i < len(datastoreNames); i++ { - if val, ok := datastoreListMap[datastoreNames[i]]; ok { - dsUrls = append(dsUrls, val) + if !isMultiVcSetup { + for i := 0; i < len(datastoreNames); i++ { + if val, ok := datastoreListMap[datastoreNames[i]]; ok { + dsUrls = append(dsUrls, val) + } } } for _, sspod := range ssPodsBeforeScaleDown.Items { _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, volumespec := range sspod.Spec.Volumes { + var isPreferred bool if volumespec.PersistentVolumeClaim != nil { pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) - isPreferred := e2eVSphere.verifyPreferredDatastoreMatch(pv.Spec.CSI.VolumeHandle, dsUrls) - if isPreferred { - framework.Logf("volume %s is created on preferred datastore %v", pv.Spec.CSI.VolumeHandle, dsUrls) - counter = counter + 1 + if !isMultiVcSetup { + isPreferred = e2eVSphere.verifyPreferredDatastoreMatch(pv.Spec.CSI.VolumeHandle, dsUrls) + if isPreferred { + framework.Logf("volume %s is created on preferred datastore %v", pv.Spec.CSI.VolumeHandle, dsUrls) + counter = counter + 1 + } + } else { + isPreferred = multiVCe2eVSphere.verifyPreferredDatastoreMatchInMultiVC(pv.Spec.CSI.VolumeHandle, + multiVCDsUrls) + if isPreferred { + framework.Logf("volume %s is created on preferred datastore %v", pv.Spec.CSI.VolumeHandle, + multiVCDsUrls) + counter = counter + 1 + } } + } } } @@ -270,7 +305,8 @@ chosen preferred datastore or not for standalone pods */ func verifyVolumeProvisioningForStandalonePods(ctx context.Context, client clientset.Interface, pod *v1.Pod, - namespace string, datastoreNames []string, datastoreListMap map[string]string) { + namespace string, datastoreNames []string, datastoreListMap map[string]string, + isMultiVcSetup bool, multiVCDsUrls []string) { var flag bool = false var dsUrls []string for i := 0; i < len(datastoreNames); i++ { @@ -281,10 +317,18 @@ func verifyVolumeProvisioningForStandalonePods(ctx context.Context, for _, volumespec := range pod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { pv := getPvFromClaim(client, pod.Namespace, volumespec.PersistentVolumeClaim.ClaimName) - isPreferred := e2eVSphere.verifyPreferredDatastoreMatch(pv.Spec.CSI.VolumeHandle, dsUrls) - if isPreferred { - framework.Logf("volume %s is created on preferred datastore %v", pv.Spec.CSI.VolumeHandle, dsUrls) - flag = true + if !isMultiVcSetup { + isPreferred := e2eVSphere.verifyPreferredDatastoreMatch(pv.Spec.CSI.VolumeHandle, dsUrls) + if isPreferred { + framework.Logf("volume %s is created on preferred datastore %v", pv.Spec.CSI.VolumeHandle, dsUrls) + flag = true + } + } else { + isPreferred := multiVCe2eVSphere.verifyPreferredDatastoreMatchInMultiVC(pv.Spec.CSI.VolumeHandle, multiVCDsUrls) + if isPreferred { + framework.Logf("volume %s is created on preferred datastore %v", pv.Spec.CSI.VolumeHandle, multiVCDsUrls) + flag = true + } } } } @@ -302,7 +346,7 @@ func tagSameDatastoreAsPreferenceToDifferentRacks(masterIp string, sshClientConf i := 0 for j := 0; j < len(datastoreNames); j++ { i = i + 1 - err := attachTagToPreferredDatastore(masterIp, sshClientConfig, datastoreNames[j], zoneValue) + err := attachTagToPreferredDatastore(masterIp, sshClientConfig, datastoreNames[j], zoneValue, false, 0) if err != nil { return err } @@ -317,14 +361,22 @@ func tagSameDatastoreAsPreferenceToDifferentRacks(masterIp string, sshClientConf tagPreferredDatastore method is used to tag the datastore which is chosen for volume provisioning */ func tagPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, zoneValue string, itr int, - datastoreListMap map[string]string, datastoreNames []string) ([]string, error) { + datastoreListMap map[string]string, datastoreNames []string, + isMultiVcSetup bool, clientIndex int) ([]string, error) { var preferredDatastorePaths []string + var err error i := 0 if datastoreNames == nil { for dsName := range datastoreListMap { i = i + 1 preferredDatastorePaths = append(preferredDatastorePaths, dsName) - err := attachTagToPreferredDatastore(masterIp, sshClientConfig, dsName, zoneValue) + if !isMultiVcSetup { + err = attachTagToPreferredDatastore(masterIp, sshClientConfig, dsName, zoneValue, + false, clientIndex) + } else { + err = attachTagToPreferredDatastore(masterIp, sshClientConfig, dsName, zoneValue, + true, clientIndex) + } if err != nil { return preferredDatastorePaths, err } @@ -337,7 +389,13 @@ func tagPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, z for dsName := range datastoreListMap { if !slices.Contains(datastoreNames, dsName) { preferredDatastorePaths = append(preferredDatastorePaths, dsName) - err := attachTagToPreferredDatastore(masterIp, sshClientConfig, dsName, zoneValue) + if !isMultiVcSetup { + err = attachTagToPreferredDatastore(masterIp, sshClientConfig, dsName, zoneValue, + false, clientIndex) + } else { + err = attachTagToPreferredDatastore(masterIp, sshClientConfig, dsName, zoneValue, + true, clientIndex) + } if err != nil { return preferredDatastorePaths, err } @@ -354,12 +412,11 @@ func tagPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, z // restartCSIDriver method restarts the csi driver func restartCSIDriver(ctx context.Context, client clientset.Interface, namespace string, csiReplicas int32) (bool, error) { - isServiceStopped, err := stopCSIPods(ctx, client) + isServiceStopped, err := stopCSIPods(ctx, client, namespace) if err != nil { return isServiceStopped, err } - isServiceStarted, err := startCSIPods(ctx, client, csiReplicas) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStarted, err := startCSIPods(ctx, client, csiReplicas, namespace) if err != nil { return isServiceStarted, err } @@ -371,20 +428,40 @@ setPreferredDatastoreTimeInterval method is used to set the time interval at whi datastores are refreshed in the environment */ func setPreferredDatastoreTimeInterval(client clientset.Interface, ctx context.Context, - csiNamespace string, namespace string, csiReplicas int32) { + csiNamespace string, csiReplicas int32, isMultiVcSetup bool) { + + var modifiedConf string + var vsphereCfg e2eTestConfig + + // read current secret currentSecret, err := client.CoreV1().Secrets(csiNamespace).Get(ctx, configSecret, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // read original conf originalConf := string(currentSecret.Data[vSphereCSIConf]) - vsphereCfg, err := readConfigFromSecretString(originalConf) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - if vsphereCfg.Global.CSIFetchPreferredDatastoresIntervalInMin == 0 { - vsphereCfg.Global.CSIFetchPreferredDatastoresIntervalInMin = preferredDatastoreRefreshTimeInterval - modifiedConf, err := writeConfigToSecretString(vsphereCfg) + + if !isMultiVcSetup { + vsphereCfg, err = readConfigFromSecretString(originalConf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Updating the secret to reflect new changes") - currentSecret.Data[vSphereCSIConf] = []byte(modifiedConf) - _, err = client.CoreV1().Secrets(csiNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + } else { + vsphereCfg, err = readVsphereConfCredentialsInMultiVcSetup(originalConf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if vsphereCfg.Global.CSIFetchPreferredDatastoresIntervalInMin == 0 { + vsphereCfg.Global.CSIFetchPreferredDatastoresIntervalInMin = preferredDatastoreRefreshTimeInterval + if !isMultiVcSetup { + modifiedConf, err = writeConfigToSecretString(vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Updating the secret to reflect new changes") + currentSecret.Data[vSphereCSIConf] = []byte(modifiedConf) + _, err = client.CoreV1().Secrets(csiNamespace).Update(ctx, currentSecret, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = writeNewDataAndUpdateVsphereConfSecret(client, ctx, csiNamespace, vsphereCfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + // restart csi driver restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") @@ -409,10 +486,15 @@ func getNonSharedDatastoresInCluster(ClusterdatastoreListMap map[string]string, } // deleteTagCreatedForPreferredDatastore method is used to delete the tag created on preferred datastore -func deleteTagCreatedForPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, tagName []string) error { +func deleteTagCreatedForPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, + tagName []string, isMultiVcSetup bool) error { + var deleteTagCat string for i := 0; i < len(tagName); i++ { - deleteTagCat := govcLoginCmd() + - "govc tags.rm -f -c " + preferredDSCat + " " + tagName[i] + if !isMultiVcSetup { + deleteTagCat = govcLoginCmd() + "govc tags.rm -f -c " + preferredDSCat + " " + tagName[i] + } else { + deleteTagCat = govcLoginCmdForMultiVC(i) + "govc tags.rm -f -c " + preferredDSCat + " " + tagName[i] + } framework.Logf("Deleting tag created for preferred datastore: %s ", deleteTagCat) deleteTagCatRes, err := sshExec(sshClientConfig, masterIp, deleteTagCat) if err != nil && deleteTagCatRes.Code != 0 { @@ -425,10 +507,18 @@ func deleteTagCreatedForPreferredDatastore(masterIp string, sshClientConfig *ssh } // createTagForPreferredDatastore method is used to create tag required for choosing preferred datastore -func createTagForPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, tagName []string) error { +func createTagForPreferredDatastore(masterIp string, sshClientConfig *ssh.ClientConfig, + tagName []string, isMultiVcSetup bool) error { + var createTagCat string for i := 0; i < len(tagName); i++ { - createTagCat := govcLoginCmd() + - "govc tags.create -d '" + preferredTagDesc + "' -c " + preferredDSCat + " " + tagName[i] + if !isMultiVcSetup { + createTagCat = govcLoginCmd() + + "govc tags.create -d '" + preferredTagDesc + "' -c " + preferredDSCat + " " + tagName[i] + } else { + createTagCat = govcLoginCmdForMultiVC(i) + + "govc tags.create -d '" + preferredTagDesc + "' -c " + preferredDSCat + " " + tagName[i] + framework.Logf(createTagCat) + } framework.Logf("Creating tag for preferred datastore: %s ", createTagCat) createTagCatRes, err := sshExec(sshClientConfig, masterIp, createTagCat) if err != nil && createTagCatRes.Code != 0 { @@ -446,7 +536,7 @@ volume snapshot and to verify if volume snapshot has created or not */ func createSnapshotClassAndVolSnapshot(ctx context.Context, snapc *snapclient.Clientset, namespace string, pvclaim *v1.PersistentVolumeClaim, - volHandle string, stsPvc bool) (*snapV1.VolumeSnapshot, *snapV1.VolumeSnapshotClass, string) { + volHandle string, stsPvc bool, isMultiVcSetup bool) (*snapV1.VolumeSnapshot, *snapV1.VolumeSnapshotClass, string) { framework.Logf("Create volume snapshot class") volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, @@ -479,8 +569,13 @@ func createSnapshotClassAndVolSnapshot(ctx context.Context, snapc *snapclient.Cl snapshotId := strings.Split(snapshothandle, "+")[1] framework.Logf("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !isMultiVcSetup { + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } return volumeSnapshot, volumeSnapshotClass, snapshotId } @@ -491,26 +586,28 @@ snapshot class, volume snapshot created for pvc post testcase completion */ func performCleanUpForSnapshotCreated(ctx context.Context, snapc *snapclient.Clientset, namespace string, volHandle string, volumeSnapshot *snapV1.VolumeSnapshot, snapshotId string, - volumeSnapshotClass *snapV1.VolumeSnapshotClass) { + volumeSnapshotClass *snapV1.VolumeSnapshotClass, pandoraSyncWaitTime int, isMultiVcSetup bool) { framework.Logf("Delete volume snapshot and verify the snapshot content is deleted") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) framework.Logf("Wait till the volume snapshot is deleted") - err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) + err := waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - framework.Logf("Deleting volume snapshot Again to check Not found error") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - if !apierrors.IsNotFound(err) { + if !isMultiVcSetup { + framework.Logf("Verify snapshot entry is deleted from CNS") + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + framework.Logf("Verify snapshot entry is deleted from CNS") + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, true) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + framework.Logf("Deleting volume snapshot Again to check Not found error") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + framework.Logf("Deleting volume snapshot class") err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/preupgrade_datasetup.go b/tests/e2e/preupgrade_datasetup.go index c75957180a..b7a1e51564 100644 --- a/tests/e2e/preupgrade_datasetup.go +++ b/tests/e2e/preupgrade_datasetup.go @@ -92,7 +92,7 @@ var _ = ginkgo.Describe("PreUpgrade datasetup Test", func() { statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready diff --git a/tests/e2e/prevent_duplicate_cluster_ids.go b/tests/e2e/prevent_duplicate_cluster_ids.go new file mode 100644 index 0000000000..a780c85537 --- /dev/null +++ b/tests/e2e/prevent_duplicate_cluster_ids.go @@ -0,0 +1,949 @@ +/* + Copyright 2023 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "math/rand" + "strings" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "golang.org/x/crypto/ssh" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + fssh "k8s.io/kubernetes/test/e2e/framework/ssh" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = ginkgo.Describe("Prevent duplicate cluster ID", func() { + f := framework.NewDefaultFramework("cluster-id-test") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + csiNamespace string + csiReplicas int32 + vCenterUIUser string + vCenterUIPassword string + clusterId string + revertToOriginalVsphereSecret bool + vCenterIP string + vCenterPort string + dataCenter string + scParameters map[string]string + accessMode v1.PersistentVolumeAccessMode + sshClientConfig *ssh.ClientConfig + nimbusGeneratedK8sVmPwd string + ) + + ginkgo.BeforeEach(func() { + var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client = f.ClientSet + namespace = f.Namespace.Name + bootstrap() + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + scParameters = make(map[string]string) + accessMode = v1.ReadWriteOnce + // fetching required parameters + + csiNamespace = GetAndExpectStringEnvVar(envCSINamespace) + csiDeployment, err := client.AppsV1().Deployments(csiNamespace).Get( + ctx, vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiReplicas = *csiDeployment.Spec.Replicas + + vsphereCfg := getCSIConfigSecretData(client, ctx, csiNamespace) + vCenterUIUser = vsphereCfg.Global.User + vCenterUIPassword = vsphereCfg.Global.Password + dataCenter = vsphereCfg.Global.Datacenters + clusterId = vsphereCfg.Global.ClusterID + vCenterIP = e2eVSphere.Config.Global.VCenterHostname + vCenterPort = e2eVSphere.Config.Global.VCenterPort + framework.Logf("clusterId: %v", clusterId) + revertToOriginalVsphereSecret = false + nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) + + sshClientConfig = &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + }) + + /* + Generate unique cluster id through configmap and create workloads + 1. Create vsphere config secret with no cluster id field. + 2. Validate that "vsphere-csi-cluster-id" configmap is generated with a unique cluster id. + 3. Create statefulset with replica 3 and a deployment. + 4. Verify all PVCs are in bound state and pods are in running state. + 5. Scale sts replica to 5. + 6. Verify cns metadata and check if cluster id is populated in cns metadata. + 7. Clean up the sts, deployment, pods and PVCs. + + */ + ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Generate unique cluster id through configmap"+ + " and create workloads", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + + ginkgo.By("Creating csi config secret with no cluster id field set") + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + + defer func() { + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + revertToOriginalVsphereSecret = true + }() + + ginkgo.By("Verify cluster id configmap is auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, true) + clusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + framework.Logf("clusterID: %v", clusterID) + + ginkgo.By("Creating Storage Class") + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + } + sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + // Check if it is file volumes setups + if rwxAccessMode { + accessMode = v1.ReadWriteMany + } + ginkgo.By("Creating statefulset with replica 3 and a deployment") + statefulset, deployment, _ := createStsDeployment(ctx, client, namespace, sc, true, + false, 0, "", accessMode) + replicas := *(statefulset.Spec.Replicas) + + defer func() { + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } + }() + + // Scale up replicas of statefulset and verify CNS entries for volumes + scaleUpStsAndVerifyPodMetadata(ctx, client, namespace, statefulset, + replicas+2, true, true) + verifyVolumeMetadataOnDeployments(ctx, client, deployment, namespace, nil, nil, + nil, "") + err = checkClusterIdValueOnWorkloads(&e2eVSphere, client, ctx, namespace, clusterID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + + }) + + /* + Modify unique cluster id value in Configmap + 1. Create vsphere config secret with no cluster id field. + 2. Validate that "vsphere-csi-cluster-id" configmap is generated with a unique cluster id. + 3. Change the cluster id value in "vsphere-csi-cluster-id" configmap, which should throw a proper error. + + */ + ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Modify unique cluster id value in"+ + " Configmap", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + + ginkgo.By("Creating csi config secret with no cluster id field set") + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + + defer func() { + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + revertToOriginalVsphereSecret = true + }() + + ginkgo.By("Verify cluster id configmap is auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, true) + + ginkgo.By("Modify unique cluster id value in Configmap") + clusterIdCm, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, vsphereClusterIdConfigMapName, + metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + clusterIdCm.Data["clusterID"] = "cluster1" + _, err = client.CoreV1().ConfigMaps(csiNamespace).Update(ctx, clusterIdCm, + metav1.UpdateOptions{}) + framework.Logf("Error from updating cluster id value in configmap is : %v", err.Error()) + gomega.Expect(err).To(gomega.HaveOccurred()) + + }) + + /* + Generate cluster id and then set cluster id in vsphere config secret + and remove cluster id field in vsphere config secret + 1. Create vsphere config secret with no cluster id field. + 2. Validate that "vsphere-csi-cluster-id" configmap is generated with a unique cluster id. + 3. Create statefulset with replica 3 and a deployment. + 4. Verify all PVCs are in bound state and pods are in running state. + 5. Delete config secret and create config secret with cluster id set and restart csi driver. + 6. Check if "vsphere-csi-cluster-id" configmap still exists. + 7. Verify CSI pods go into crashing state with a proper error message. + 8. Remove cluster id field from vsphere config secret and restart csi driver. + 9. Verify csi pods are in running state. + 10. Scale sts replica to 5. + 11. Verify cns metadata and check if cluster id is populated in cns metadata. + 12. Clean up the sts, deployment, pods and PVCs. + + */ + ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Generate cluster id and then set cluster id "+ + "in vsphere config secret and remove cluster id field in vsphere config secret", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + + ginkgo.By("Creating csi config secret with no cluster id field set") + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + defer func() { + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + revertToOriginalVsphereSecret = true + }() + + ginkgo.By("Verify cluster id configmap is auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, true) + clusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + framework.Logf("clusterID: %v", clusterID) + + ginkgo.By("Creating Storage Class") + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + } + sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + // Check if it is file volumes setups + if rwxAccessMode { + accessMode = v1.ReadWriteMany + } + ginkgo.By("Creating statefulset with replica 3 and a deployment") + statefulset, deployment, _ := createStsDeployment(ctx, client, namespace, sc, true, + false, 0, "", accessMode) + replicas := *(statefulset.Spec.Replicas) + + defer func() { + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } + }() + + ginkgo.By("Creating csi config secret with cluster id field set") + createCsiVsphereSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, + vCenterIP, vCenterPort, dataCenter, "", "cluster1") + + ginkgo.By("Restart CSI driver") + _, err = restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(err).To(gomega.HaveOccurred()) + ginkgo.By("Verify that cluster id configmap still exists") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, true) + + ginkgo.By("Check if csi pods are in crashing state after recreation of secret with proper message") + csipods, err := client.CoreV1().Pods(csiNamespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + k8sMasterIPs := getK8sMasterIPs(ctx, client) + k8sMasterIP := k8sMasterIPs[0] + var csiPodName string + for _, csiPod := range csipods.Items { + if strings.Contains(csiPod.Name, vSphereCSIControllerPodNamePrefix) { + csiPodName = csiPod.Name + break + } + } + errMessage := "Please remove the cluster ID from vSphere Config Secret." + grepCmdForErrMsg := "echo `kubectl logs " + csiPodName + " -n " + + csiSystemNamespace + " --allContainers" + " | grep " + "'" + errMessage + + framework.Logf("Invoking command '%v' on host %v", grepCmdForErrMsg, + k8sMasterIP) + result, err := sshExec(sshClientConfig, k8sMasterIP, + grepCmdForErrMsg) + if err != nil || result.Code != 0 { + fssh.LogResult(result) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("couldn't execute command: %s on host: %v , error: %s", + grepCmdForErrMsg, k8sMasterIP, err)) + } + if result.Stdout != "" { + framework.Logf("CSI pods are in crashing state with proper error message") + } else { + framework.Logf("CSI pods are in crashing state with improper error message") + gomega.Expect(err).To(gomega.HaveOccurred()) + } + + ginkgo.By("Remove cluster id field from vsphere config secret and verify csi pods are in running state") + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + newclusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + if clusterID != newclusterID { + framework.Failf("New clusterID should not be generated") + } + csipods, err = client.CoreV1().Pods(csiNamespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitForPodsRunningReady(client, csiNamespace, int32(csipods.Size()), 0, pollTimeout, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Scale up replicas of statefulset and verify CNS entries for volumes") + scaleUpStsAndVerifyPodMetadata(ctx, client, namespace, statefulset, + replicas+2, true, true) + verifyVolumeMetadataOnDeployments(ctx, client, deployment, namespace, nil, nil, + nil, "") + err = checkClusterIdValueOnWorkloads(&e2eVSphere, client, ctx, namespace, clusterID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + + }) + + /* + Recreate vsphere config secret multiple times + 1. Create vsphere config secret with no cluster id field. + 2. Validate that "vsphere-csi-cluster-id" configmap is generated with a unique cluster id. + 3. Create statefulset with replica 3 and a deployment. + 4. Verify all PVCs are in bound state and pods are in running state. + 5. Delete and create vsphere config secret multiple times(3-4 times atleast continuously). + 6. Verify "vsphere-csi-cluster-id" configmap still exists with same cluster id. + 7. Scale sts replica to 5. + 8. Verify cns metadata and check if cluster id is populated in cns metadata. + 9. Clean up the sts, deployment, pods and PVCs. + + */ + ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Recreate vsphere config secret multiple"+ + " times", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + + ginkgo.By("Creating csi config secret with no cluster id field set") + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + + defer func() { + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + revertToOriginalVsphereSecret = true + }() + + ginkgo.By("Verify cluster id configmap is auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, true) + clusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + framework.Logf("clusterID: %v", clusterID) + + ginkgo.By("Creating Storage Class") + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + } + sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + // Check if it is file volumes setups + if rwxAccessMode { + accessMode = v1.ReadWriteMany + } + ginkgo.By("Creating statefulset with replica 3 and a deployment") + statefulset, deployment, _ := createStsDeployment(ctx, client, namespace, sc, true, + false, 0, "", accessMode) + replicas := *(statefulset.Spec.Replicas) + + defer func() { + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } + }() + + ginkgo.By("Recreating CSI config secret multiple times to verify if a" + + "new cluster id configmap gets auto generated") + for i := 0; i < 3; i++ { + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + } + newclusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + if clusterID != newclusterID { + framework.Failf("New clusterID should not be generated") + } + + ginkgo.By("Scale up replicas of statefulset and verify CNS entries for volumes") + scaleUpStsAndVerifyPodMetadata(ctx, client, namespace, statefulset, + replicas+2, true, true) + verifyVolumeMetadataOnDeployments(ctx, client, deployment, namespace, nil, nil, + nil, "") + err = checkClusterIdValueOnWorkloads(&e2eVSphere, client, ctx, namespace, clusterID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + + }) + + /* + Create vsphere config secret with cluster id value set with special characters + 1. Special characters in the user-provided value for cluster-id key in vsphere config secret and try to create PVC + a. cluster-id" key and value including special characters - example "#1$k8s" + b. "cluster-id" key and value as maximum length of characters + 2. The CNS metadata for the PVC should have a cluster id value set. + */ + ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Create vsphere config secret with cluster "+ + "id value set with special characters", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Creating csi config secret with cluster id value with some special characters field set") + clusterID := "#1$k8s" + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterID, vCenterPort, dataCenter, csiReplicas) + + defer func() { + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + revertToOriginalVsphereSecret = true + }() + + ginkgo.By("Verify cluster id configmap is not auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, false) + + ginkgo.By("Creating Storage Class and PVC") + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + accessMode = v1.ReadWriteMany + } + sc, pvclaim, err := createPVCAndStorageClass(client, + namespace, nil, scParameters, diskSize, nil, "", false, accessMode) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvs, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + queryResult1, err := e2eVSphere.queryCNSVolumeWithResult(pvs[0].Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(queryResult1.Volumes) > 0).To(gomega.BeTrue()) + + framework.Logf("Cluster ID value on CNS is %s", + queryResult1.Volumes[0].Metadata.ContainerClusterArray[0].ClusterId) + gomega.Expect(queryResult1.Volumes[0].Metadata.ContainerClusterArray[0].ClusterId).Should( + gomega.Equal(clusterID), "Wrong/empty cluster id name present") + + defer func() { + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pvs[0].Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } + }() + + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pvs[0].Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pvs[0].Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + + letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + // 64 is the max length supported for cluster id value + n := 64 + c := make([]rune, n) + for i := range c { + c[i] = letters[rand.Intn(len(letters))] + } + clusterID = string(c) + + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterID, vCenterPort, dataCenter, csiReplicas) + + ginkgo.By("Verify cluster id configmap is not auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, false) + + sc, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, diskSize, nil, "", false, accessMode) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvs, err = fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + queryResult2, err := e2eVSphere.queryCNSVolumeWithResult(pvs[0].Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(queryResult2.Volumes) > 0).To(gomega.BeTrue()) + + framework.Logf("Cluster ID value on CNS is %s", + queryResult2.Volumes[0].Metadata.ContainerClusterArray[0].ClusterId) + gomega.Expect(queryResult2.Volumes[0].Metadata.ContainerClusterArray[0].ClusterId).Should( + gomega.Equal(clusterID), "Wrong/empty cluster id name present") + + }) + + /* + Restart CSI pods multiple times + 1. Create vsphere config secret with no cluster id field. + 2. Validate that "vsphere-csi-cluster-id" configmap is generated with a unique cluster id. + 3. Create statefulset with replica 3 and a deployment. + 4. Verify all PVCs are in bound state and pods are in running state. + 5. Delete and create vsphere config secret multiple times(3-4 times atleast continuously). + 6. Verify "vsphere-csi-cluster-id" configmap still exists with same cluster id. + 7. Scale sts replica to 5. + 8. Verify cns metadata and check if cluster id is populated in cns metadata. + 9. Clean up the sts, deployment, pods and PVCs. + */ + ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Restart CSI pods multiple"+ + " times", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + ginkgo.By("Creating csi config secret with no cluster id field set") + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + defer func() { + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + revertToOriginalVsphereSecret = true + }() + + ginkgo.By("Verify cluster id configmap is auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, true) + clusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + framework.Logf("clusterID: %v", clusterID) + + ginkgo.By("Creating Storage Class") + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + } + sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + // Check if it is file volumes setups + if rwxAccessMode { + accessMode = v1.ReadWriteMany + } + ginkgo.By("Creating statefulset with replica 3 and a deployment") + statefulset, deployment, _ := createStsDeployment(ctx, client, namespace, sc, true, + false, 0, "", accessMode) + replicas := *(statefulset.Spec.Replicas) + + defer func() { + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } + }() + for i := 0; i < 3; i++ { + restartSuccess, err := restartCSIDriver(ctx, client, csiNamespace, csiReplicas) + gomega.Expect(restartSuccess).To(gomega.BeTrue(), "csi driver restart not successful") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Scale up replicas of statefulset and verify CNS entries for volumes") + scaleUpStsAndVerifyPodMetadata(ctx, client, namespace, statefulset, + replicas+2, true, true) + verifyVolumeMetadataOnDeployments(ctx, client, deployment, namespace, nil, nil, + nil, "") + err = checkClusterIdValueOnWorkloads(&e2eVSphere, client, ctx, namespace, clusterID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + + }) + + /* + Delete CSI driver + 1. Create vsphere config secret with no cluster id field. + 2. Validate that "vsphere-csi-cluster-id" configmap is generated with a unique cluster id. + 3. Create statefulset with replica 3 and a deployment. + 4. Verify all PVCs are in bound state and pods are in running state. + 5. Delete and create vsphere config secret multiple times(3-4 times atleast continuously). + 6. Verify "vsphere-csi-cluster-id" configmap still exists with same cluster id. + 7. Scale sts replica to 5. + 8. Verify cns metadata and check if cluster id is populated in cns metadata. + 9. Clean up the sts, deployment, pods and PVCs. + */ + ginkgo.It("[csi-config-secret-block][csi-config-secret-file] Delete CSI"+ + " driver", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + + ginkgo.By("Creating csi config secret with no cluster id field set") + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + "", vCenterPort, dataCenter, csiReplicas) + + defer func() { + if !revertToOriginalVsphereSecret { + ginkgo.By("Delete vsphere-csi-cluster-id configmap if it exists") + _, err := client.CoreV1().ConfigMaps(csiNamespace).Get(ctx, + vsphereClusterIdConfigMapName, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + err = client.CoreV1().ConfigMaps(csiNamespace).Delete(ctx, + vsphereClusterIdConfigMapName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By("Reverting back to original vsphere secret") + framework.Logf("clusterId: %v", clusterId) + recreateVsphereConfigSecret(client, ctx, vCenterUIUser, vCenterUIPassword, csiNamespace, vCenterIP, + clusterId, vCenterPort, dataCenter, csiReplicas) + } + revertToOriginalVsphereSecret = true + }() + ginkgo.By("Verify cluster id configmap is auto generated by csi driver") + verifyClusterIdConfigMapGeneration(client, ctx, csiNamespace, true) + clusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + framework.Logf("clusterID: %v", clusterID) + + ginkgo.By("Creating Storage Class") + if rwxAccessMode { + scParameters[scParamFsType] = nfs4FSType + } + sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete Storage Class") + err = client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + // Check if it is file volumes setups + if rwxAccessMode { + accessMode = v1.ReadWriteMany + } + ginkgo.By("Creating statefulset with replica 3 and a deployment") + statefulset, deployment, _ := createStsDeployment(ctx, client, namespace, sc, true, + false, 0, "", accessMode) + replicas := *(statefulset.Spec.Replicas) + + defer func() { + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } + }() + + var ignoreLabels map[string]string + list_of_pods, err := fpod.GetPodsInNamespace(client, csiSystemNamespace, ignoreLabels) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + allMasterIps := getK8sMasterIPs(ctx, client) + masterIP := "" + filePath := "/root/vsphere-csi-driver.yaml" + cmd := "kubectl delete -f " + filePath + for _, k8sMasterIP := range allMasterIps { + framework.Logf("Invoking command '%v' on host %v", cmd, + k8sMasterIP) + result, err := sshExec(sshClientConfig, k8sMasterIP, + cmd) + fssh.LogResult(result) + if err == nil { + framework.Logf("File exists on %s", k8sMasterIP) + masterIP = k8sMasterIP + break + } + } + + for _, pod := range list_of_pods { + err = fpod.WaitForPodNotFoundInNamespace(client, pod.Name, csiNamespace, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("pod %q was not deleted: %v", pod.Name, err)) + } + + cmd = "kubectl apply -f " + filePath + framework.Logf("Invoking command '%v' on host %v", cmd, + masterIP) + result, err := sshExec(sshClientConfig, masterIP, + cmd) + if err != nil || result.Code != 0 { + fssh.LogResult(result) + gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("couldn't execute command: %s on host: %v , error: %s", + cmd, masterIP, err)) + } + + // Wait for the CSI Pods to be up and Running + list_of_pods, err = fpod.GetPodsInNamespace(client, csiSystemNamespace, ignoreLabels) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + num_csi_pods := len(list_of_pods) + err = fpod.WaitForPodsRunningReady(client, csiSystemNamespace, int32(num_csi_pods), 0, + pollTimeout, ignoreLabels) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + newclusterID := fetchClusterIdFromConfigmap(client, ctx, csiNamespace) + if clusterID != newclusterID { + framework.Failf("New clusterID should not be generated") + } + + ginkgo.By("Scale up replicas of statefulset and verify CNS entries for volumes") + scaleUpStsAndVerifyPodMetadata(ctx, client, namespace, statefulset, + replicas+2, true, true) + verifyVolumeMetadataOnDeployments(ctx, client, deployment, namespace, nil, nil, + nil, "") + err = checkClusterIdValueOnWorkloads(&e2eVSphere, client, ctx, namespace, clusterID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + + }) + +}) diff --git a/tests/e2e/provision_with_multiple_zones.go b/tests/e2e/provision_with_multiple_zones.go index 6609e33fa7..9a19524dae 100644 --- a/tests/e2e/provision_with_multiple_zones.go +++ b/tests/e2e/provision_with_multiple_zones.go @@ -166,7 +166,7 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With in the allowed topology //Steps 1. Create SC with multiple Zone and region details specified in the SC - 2. Create statefulset with replica 5 using the above SC + 2. Create statefulset with replica 3 using the above SC 3. Wait for PV, PVC to bound 4. Statefulset should get distributed across zones. 5. Describe PV and verify node affinity details should contain both @@ -199,11 +199,17 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - // Creating statefulset with 5 replicas - ginkgo.By("Creating statefulset with 5 replica") + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + // Creating statefulset with 3 replicas + ginkgo.By("Creating statefulset with 3 replica") statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") - var replica int32 = 5 + var replica int32 = 3 statefulset.Spec.Replicas = &replica replicas := *(statefulset.Spec.Replicas) CreateStatefulSet(namespace, statefulset, client) diff --git a/tests/e2e/raw_block_volume.go b/tests/e2e/raw_block_volume.go new file mode 100644 index 0000000000..de8e1d0fdf --- /dev/null +++ b/tests/e2e/raw_block_volume.go @@ -0,0 +1,1307 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "math/rand" + "os" + "os/exec" + "strconv" + "strings" + "time" + + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + cnstypes "github.com/vmware/govmomi/cns/types" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" + admissionapi "k8s.io/pod-security-admission/api" +) + +const ( + statefulset_volname string = "block-vol" + statefulset_devicePath string = "/dev/testblk" + pod_devicePathPrefix string = "/mnt/volume" + volsizeInMiBBeforeExpansion int64 = 2048 +) + +var _ = ginkgo.Describe("raw block volume support", func() { + + f := framework.NewDefaultFramework("e2e-raw-block-volume") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + namespace string + client clientset.Interface + defaultDatacenter *object.Datacenter + datastoreURL string + scParameters map[string]string + storageClassName string + storagePolicyName string + svcPVCName string + rawBlockVolumeMode = corev1.PersistentVolumeBlock + pandoraSyncWaitTime int + deleteFCDRequired bool + fcdID string + pv *corev1.PersistentVolume + pvc *corev1.PersistentVolumeClaim + snapc *snapclient.Clientset + restConfig *restclient.Config + guestClusterRestConfig *restclient.Config + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + namespace = getNamespaceToRunTests(f) + client = f.ClientSet + bootstrap() + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + //Get snapshot client using the rest config + if !guestCluster { + restConfig = getRestConfigClient() + snapc, err = snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + guestClusterRestConfig = getRestConfigClientForGuestCluster(guestClusterRestConfig) + snapc, err = snapclient.NewForConfig(guestClusterRestConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } + scParameters = make(map[string]string) + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + deleteFCDRequired = false + + var datacenters []string + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + finder := find.NewFinder(e2eVSphere.Client.Client, false) + cfg, err := getConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dcList := strings.Split(cfg.Global.Datacenters, ",") + for _, dc := range dcList { + dcName := strings.TrimSpace(dc) + if dcName != "" { + datacenters = append(datacenters, dcName) + } + } + for _, dc := range datacenters { + defaultDatacenter, err = finder.Datacenter(ctx, dc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(defaultDatacenter) + defaultDatastore, err = getDatastoreByURL(ctx, datastoreURL, defaultDatacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, rqLimit) + } + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By(fmt.Sprintf("Deleting all statefulsets and/or deployments in namespace: %v", namespace)) + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + ginkgo.By(fmt.Sprintf("Deleting service nginx in namespace: %v", namespace)) + err := client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + if deleteFCDRequired { + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", fcdID)) + err := e2eVSphere.deleteFCD(ctx, fcdID, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + }) + + /* + Test statefulset scaleup/scaledown operations with raw block volume + Steps + 1. Create a storage class. + 2. Create nginx service. + 3. Create nginx statefulsets with 3 replicas and using raw block volume. + 4. Wait until all Pods are ready and PVCs are bounded with PV. + 5. Scale down statefulsets to 2 replicas. + 6. Scale up statefulsets to 3 replicas. + 7. Scale down statefulsets to 0 replicas and delete all pods. + 8. Delete all PVCs from the tests namespace. + 9. Delete the storage class. + */ + ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] [csi-guest]"+ + "Statefulset testing with raw block volume and default podManagementPolicy", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + storageClassName = defaultNginxStorageClassName + if vanillaCluster { + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + } else if guestCluster { + ginkgo.By("CNS_TEST: Running for GC setup") + storageClassName = defaultNginxStorageClassName + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Creating StorageClass for Statefulset") + scSpec := getVSphereStorageClassSpec(storageClassName, scParameters, nil, "", "", false) + sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Creating statefulset with raw block volume") + scName := defaultNginxStorageClassName + statefulset := GetStatefulSetFromManifest(namespace) + statefulset.Spec.Template.Spec.Containers[len(statefulset.Spec.Template.Spec.Containers)-1].VolumeMounts = nil + statefulset.Spec.Template.Spec.Containers[len(statefulset.Spec.Template.Spec.Containers)-1]. + VolumeDevices = []corev1.VolumeDevice{ + { + Name: statefulset_volname, + DevicePath: statefulset_devicePath, + }, + } + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Spec.StorageClassName = &scName + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].ObjectMeta.Name = + statefulset_volname + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.VolumeMode = + &rawBlockVolumeMode + CreateStatefulSet(namespace, statefulset, client) + replicas := *(statefulset.Spec.Replicas) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) + fss.DeleteAllStatefulSets(client, namespace) + }() + + // Waiting for pods status to be Ready + fss.WaitForStatusReadyReplicas(client, statefulset, replicas) + // Check if raw device available inside all pods of statefulset + gomega.Expect(CheckDevice(client, statefulset, statefulset_devicePath)).NotTo(gomega.HaveOccurred()) + + ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) + gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + + // Get the list of Volumes attached to Pods before scale down + var volumesBeforeScaleDown []string + for _, sspod := range ssPodsBeforeScaleDown.Items { + _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + } + volumesBeforeScaleDown = append(volumesBeforeScaleDown, volumeID) + // Verify the attached volume match the one in CNS cache + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(queryResult.Volumes) > 0).To(gomega.BeTrue()) + } + } + } + + ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) + _, scaledownErr := fss.Scale(client, statefulset, replicas-1) + gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) + fss.WaitForStatusReadyReplicas(client, statefulset, replicas-1) + ssPodsAfterScaleDown := fss.GetPodList(client, statefulset) + gomega.Expect(ssPodsAfterScaleDown.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas-1)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + + // After scale down, verify vSphere volumes are detached from deleted pods + ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") + for _, sspod := range ssPodsBeforeScaleDown.Items { + _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + if err != nil { + gomega.Expect(apierrors.IsNotFound(err), gomega.BeTrue()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + } + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode( + client, volumeID, sspod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", + volumeID, sspod.Spec.NodeName)) + } + } + } + } + + // After scale down, verify the attached volumes match those in CNS Cache + for _, sspod := range ssPodsAfterScaleDown.Items { + _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + } + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(queryResult.Volumes) > 0).To(gomega.BeTrue()) + } + } + } + + ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) + _, scaleupErr := fss.Scale(client, statefulset, replicas) + gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) + fss.WaitForStatusReplicas(client, statefulset, replicas) + fss.WaitForStatusReadyReplicas(client, statefulset, replicas) + + ssPodsAfterScaleUp := fss.GetPodList(client, statefulset) + gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + + // After scale up, verify all vSphere volumes are attached to node VMs. + ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") + for _, sspod := range ssPodsAfterScaleUp.Items { + err := fpod.WaitTimeoutForPodReadyInNamespace(client, sspod.Name, statefulset.Namespace, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, volumespec := range pod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + } + ginkgo.By("Verify scale up operation should not introduced new volume") + gomega.Expect(contains(volumesBeforeScaleDown, volumeID)).To(gomega.BeTrue()) + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + volumeID, sspod.Spec.NodeName)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var vmUUID string + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Disk is not attached to the node") + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Disk is not attached") + ginkgo.By("After scale up, verify the attached volumes match those in CNS Cache") + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(queryResult.Volumes) > 0).To(gomega.BeTrue()) + } + } + } + + replicas = 0 + ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas)) + _, scaledownErr = fss.Scale(client, statefulset, replicas) + gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) + fss.WaitForStatusReplicas(client, statefulset, replicas) + ssPodsAfterScaleDown = fss.GetPodList(client, statefulset) + gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + }) + + /* + Test dynamic volume provisioning with raw block volume + Steps + 1. Create a PVC. + 2. Create pod and wait for pod to become ready. + 3. Verify volume is attached. + 4. Write some test data to raw block device inside pod. + 5. Verify the data written on the volume correctly. + 6. Delete pod. + 7. Create a new pod using the previously created volume and wait for pod to + become ready. + 8. Verify previously written data using a read on volume. + 9. Write some new test data and verify it. + 10. Delete pod. + 11. Wait for volume to be detached. + */ + ginkgo.It("[csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized] "+ + "Should create and delete pod with the same raw block volume", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By("Creating Storage Class and PVC") + // Decide which test setup is available to run. + if vanillaCluster { + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + } else if guestCluster { + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + } + sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating raw block PVC") + pvcspec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", sc, nil, "") + pvcspec.Spec.VolumeMode = &rawBlockVolumeMode + pvc, err = fpv.CreatePVC(client, namespace, pvcspec) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) + + ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) + pvs, err := fpv.WaitForPVClaimBoundPhase(client, []*corev1.PersistentVolumeClaim{pvc}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvs).NotTo(gomega.BeEmpty()) + pv := pvs[0] + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + // svcPVCName refers to PVC Name in the supervisor cluster. + svcPVCName = volumeID + volumeID = getVolumeIDFromSupervisorCluster(svcPVCName) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, pollTimeoutShort) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating pod") + pod, err := createPod(client, namespace, nil, []*corev1.PersistentVolumeClaim{pvc}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + volumeID, pod.Spec.NodeName)) + var vmUUID string + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, true) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), fmt.Sprintf("Volume is not attached to the node, %s", vmUUID)) + + // Write and read some data on raw block volume inside the pod. + // Use same devicePath for raw block volume here as used inside podSpec by createPod(). + // Refer setVolumes() for more information on naming of devicePath. + volumeIndex := 1 + devicePath := fmt.Sprintf("%v%v", pod_devicePathPrefix, volumeIndex) + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 1mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Write and read data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify volume is detached from the node") + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, + volumeID, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volumeID, pod.Spec.NodeName)) + if guestCluster { + ginkgo.By("Waiting for CnsNodeVMAttachment controller to reconcile resource") + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, false) + } + + ginkgo.By("Creating a new pod using the same volume") + pod, err = createPod(client, namespace, nil, []*corev1.PersistentVolumeClaim{pvc}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + volumeID, pod.Spec.NodeName)) + + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, true) + } + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + // Verify previously written data. Later perform another write and verify it. + ginkgo.By(fmt.Sprintf("Verify previously written data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyDataFromRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + ginkgo.By(fmt.Sprintf("Writing new 1mb test data in file %v", testdataFile)) + op, err = exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Write and read new data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + + ginkgo.By("Deleting the pod") + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is detached from the node") + isDiskDetached, err = e2eVSphere.waitForVolumeDetachedFromNode(client, + volumeID, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volumeID, pod.Spec.NodeName)) + if guestCluster { + ginkgo.By("Waiting for 30 seconds to allow CnsNodeVMAttachment controller to reconcile resource") + time.Sleep(waitTimeForCNSNodeVMAttachmentReconciler) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, false) + } + }) + + /* + Test static volume provisioning with raw block volume + Steps: + 1. Create FCD and wait for fcd to allow syncing with pandora. + 2. Create PV Spec with volumeID set to FCDID created in Step-1, and + PersistentVolumeReclaimPolicy is set to Delete. + 3. Create PVC with the storage request set to PV's storage capacity. + 4. Wait for PV and PVC to bound. + 5. Create a POD. + 6. Verify volume is attached to the node and volume is accessible in the pod. + 7. Verify container volume metadata is present in CNS cache. + 8. Delete POD. + 9. Verify volume is detached from the node. + 10. Delete PVC. + 11. Verify PV is deleted automatically. + */ + ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] Verify basic static provisioning workflow"+ + " with raw block volume", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, "BasicStaticFCD", diskSizeInMb, defaultDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteFCDRequired = true + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + // Creating label for PV. + // PVC will use this label as Selector to find PV. + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + + ginkgo.By("Creating raw block PV") + pv = getPersistentVolumeSpec(fcdID, corev1.PersistentVolumeReclaimDelete, staticPVLabels, "") + pv.Spec.VolumeMode = &rawBlockVolumeMode + pv, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeCreated(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := fpv.DeletePersistentVolume(client, pv.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating raw block PVC") + pvc = getPersistentVolumeClaimSpec(namespace, staticPVLabels, pv.Name) + pvc.Spec.VolumeMode = &rawBlockVolumeMode + pvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Wait for PV and PVC to Bind. + framework.ExpectNoError(fpv.WaitOnPVandPVC(client, framework.NewTimeoutContextWithDefaults(), namespace, pv, pvc)) + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, pollTimeoutShort) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + // Set deleteFCDRequired to false. + // After PV, PVC is in the bind state, Deleting PVC should delete + // container volume. So no need to delete FCD directly using vSphere + // API call. + deleteFCDRequired = false + + ginkgo.By("Verifying CNS entry is present in cache") + _, err = e2eVSphere.queryCNSVolumeWithResult(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating the Pod") + var pvclaims []*corev1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvc) + pod, err := createPod(client, namespace, nil, pvclaims, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + vmUUID := getNodeUUID(ctx, client, pod.Spec.NodeName) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached") + + // Write and read some data on raw block volume inside the pod. + // Use same devicePath for raw block volume here as used inside podSpec by createPod(). + // Refer setVolumes() for more information on naming of devicePath. + volumeIndex := 1 + devicePath := fmt.Sprintf("%v%v", pod_devicePathPrefix, volumeIndex) + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 1mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By(fmt.Sprintf("Write and read data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + + ginkgo.By("Verify container volume metadata is present in CNS cache") + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolume with VolumeID: %s", pv.Spec.CSI.VolumeHandle)) + _, err = e2eVSphere.queryCNSVolumeWithResult(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + labels := []types.KeyValue{{Key: "fcd-id", Value: fcdID}} + ginkgo.By("Verify container volume metadata is matching the one in CNS cache") + err = verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + pvc.Name, pv.ObjectMeta.Name, pod.Name, labels...) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Deleting the Pod") + framework.ExpectNoError(fpod.DeletePodWithWait(client, pod), "Failed to delete pod", pod.Name) + + ginkgo.By(fmt.Sprintf("Verify volume %q is detached from the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), "Volume is not detached from the node") + }) + + /* + Test online volume expansion on dynamic raw block volume + Steps: + 1. Create StorageClass with allowVolumeExpansion set to true. + 2. Create raw block PVC which uses the StorageClass created in step 1. + 3. Wait for PV to be provisioned. + 4. Wait for PVC's status to become Bound and note down the size + 5. Create a Pod using the above created PVC + 6. Modify PVC's size to trigger online volume expansion + 7. verify the PVC status will change to "FilesystemResizePending". Wait till the status is removed + 8. Verify the resized PVC by doing CNS query + 9. Make sure data is intact on the PV mounted on the pod + 10. Make sure file system has increased + + */ + ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] [csi-guest] "+ + "Verify online volume expansion on dynamic raw block volume", func() { + ginkgo.By("Invoking Test for online Volume Expansion on raw block volume") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create StorageClass with allowVolumeExpansion set to true") + sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) + if vanillaCluster { + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamDatastoreURL] = sharedVSANDatastoreURL + } else if guestCluster { + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + } + scParameters[scParamFsType] = ext4FSType + sc, err := createStorageClass(client, scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating raw block PVC") + pvcspec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", sc, nil, "") + pvcspec.Spec.VolumeMode = &rawBlockVolumeMode + pvc, err = fpv.CreatePVC(client, namespace, pvcspec) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) + + ginkgo.By(fmt.Sprintf("Waiting for claim %s to be in bound phase", pvc.Name)) + pvs, err := fpv.WaitForPVClaimBoundPhase(client, []*corev1.PersistentVolumeClaim{pvc}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvs).NotTo(gomega.BeEmpty()) + pv = pvs[0] + volumeID := pv.Spec.CSI.VolumeHandle + if guestCluster { + // svcPVCName refers to PVC Name in the supervisor cluster. + svcPVCName = volumeID + volumeID = getVolumeIDFromSupervisorCluster(svcPVCName) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, pollTimeoutShort) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create Pod using the above raw block PVC") + var pvclaims []*corev1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvc) + pod, err := createPod(client, namespace, nil, pvclaims, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + volumeID, pod.Spec.NodeName)) + var vmUUID string + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, true) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), fmt.Sprintf("Volume is not attached to the node, %s", vmUUID)) + + // Get the size of block device and verify if device is accessible by performing write and read. + // Write and read some data on raw block volume inside the pod. + // Use same devicePath for raw block volume here as used inside podSpec by createPod(). + // Refer setVolumes() for more information on naming of devicePath. + volumeIndex := 1 + devicePath := fmt.Sprintf("%v%v", pod_devicePathPrefix, volumeIndex) + ginkgo.By(fmt.Sprintf("Check size for block device at devicePath %v before expansion", devicePath)) + originalBlockDevSize, err := getBlockDevSizeInBytes(f, namespace, pod, devicePath) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 1mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By(fmt.Sprintf("Write and read data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + + defer func() { + // Delete Pod. + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err := fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, + volumeID, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volumeID, pod.Spec.NodeName)) + if guestCluster { + ginkgo.By("Waiting for 30 seconds to allow CnsNodeVMAttachment controller to reconcile resource") + time.Sleep(waitTimeForCNSNodeVMAttachmentReconciler) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, false) + } + }() + + ginkgo.By("Increase PVC size and verify online volume resize") + increaseSizeOfPvcAttachedToPod(f, client, namespace, pvc, pod) + + ginkgo.By("Wait for block device size to be updated inside pod after expansion") + isPvcExpandedInsidePod := false + for !isPvcExpandedInsidePod { + blockDevSize, err := getBlockDevSizeInBytes(f, namespace, pod, devicePath) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if blockDevSize > originalBlockDevSize { + ginkgo.By("Volume size updated inside pod successfully") + isPvcExpandedInsidePod = true + } else { + ginkgo.By(fmt.Sprintf("updating volume size for %q. Resulting volume size is %d", pvc.Name, blockDevSize)) + time.Sleep(30 * time.Second) + } + } + + // Verify original data on raw block volume after expansion + ginkgo.By(fmt.Sprintf("Verify previously written data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyDataFromRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + // Write data on expanded space to verify the expansion is successful and accessible. + ginkgo.By(fmt.Sprintf("Writing new 1mb test data in file %v", testdataFile)) + op, err = exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Write testdata of 1MB size from offset=%v on raw volume at path %v inside pod %v", + volsizeInMiBBeforeExpansion, pod.Spec.Containers[0].VolumeDevices[0].DevicePath, pod.Name)) + verifyIOOnRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, + volsizeInMiBBeforeExpansion, 1) + }) + + /* + Test to verify volume expansion is supported if allowVolumeExpansion + is true in StorageClass, PVC is created and offline and not attached + to a Pod before the expansion. + Steps + 1. Create StorageClass with allowVolumeExpansion set to true. + 2. Create raw block PVC which uses the StorageClass created in step 1. + 3. Wait for PV to be provisioned. + 4. Wait for PVC's status to become Bound. + 5. Create pod using PVC on specific node. + 6. Wait for Disk to be attached to the node. + 7. Write some data to raw block PVC. + 8. Detach the volume. + 9. Modify PVC's size to trigger offline volume expansion. + 10. Create pod again using PVC on specific node. + 11. Wait for Disk to be attached to the node. + 12. Verify data written on PVC before expansion. + 13. Delete pod and Wait for Volume Disk to be detached from the Node. + 14. Delete PVC, PV and Storage Class. + */ + ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] [csi-guest] "+ + "Verify offline volume expansion with raw block volume", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By("Invoking Test for Offline Volume Expansion") + // Create Storage class and PVC + scParameters := make(map[string]string) + scParameters[scParamFsType] = ext4FSType + + // Create a StorageClass that sets allowVolumeExpansion to true + ginkgo.By("Creating Storage Class with allowVolumeExpansion = true") + if vanillaCluster { + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + } else if guestCluster { + ginkgo.By("CNS_TEST: Running for GC setup") + scParameters[svStorageClassName] = storagePolicyName + } + sc, err := createStorageClass(client, scParameters, nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating raw block PVC") + pvcspec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", sc, nil, "") + pvcspec.Spec.VolumeMode = &rawBlockVolumeMode + pvc, err = fpv.CreatePVC(client, namespace, pvcspec) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) + + // Waiting for PVC to be bound + var pvclaims []*corev1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvc) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volumeID := pv.Spec.CSI.VolumeHandle + svcPVCName := pv.Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(volumeID) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, pollTimeoutShort) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach PV to the node") + pod, err := createPod(client, namespace, nil, []*corev1.PersistentVolumeClaim{pvc}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var vmUUID string + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID, pod.Spec.NodeName)) + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + // Get the size of block device and verify if device is accessible by performing write and read. + volumeIndex := 1 + devicePath := fmt.Sprintf("%v%v", pod_devicePathPrefix, volumeIndex) + ginkgo.By(fmt.Sprintf("Check size for block device at devicePath %v before expansion", devicePath)) + originalBlockDevSize, err := getBlockDevSizeInBytes(f, namespace, pod, devicePath) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 1mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By(fmt.Sprintf("Write and read data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is detached from the node") + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode( + client, volumeID, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volumeID, pod.Spec.NodeName)) + if guestCluster { + ginkgo.By("Waiting for 30 seconds to allow CnsNodeVMAttachment controller to reconcile resource") + time.Sleep(waitTimeForCNSNodeVMAttachmentReconciler) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, false) + } + + // Modify PVC spec to trigger volume expansion + // We expand the PVC while no pod is using it to ensure offline expansion + ginkgo.By("Expanding current pvc") + currentPvcSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + newSize := currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse("1Gi")) + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + pvc, err = expandPVCSize(pvc, newSize, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(pvc).NotTo(gomega.BeNil()) + + pvcSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + framework.Failf("error updating pvc size %q", pvc.Name) + } + if guestCluster { + ginkgo.By("Checking for PVC request size change on SVC PVC") + b, err := verifyPvcRequestedSizeUpdateInSupervisorWithWait(svcPVCName, newSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(b).To(gomega.BeTrue()) + } + + ginkgo.By("Waiting for controller volume resize to finish") + err = waitForPvResizeForGivenPvc(pvc, client, totalResizeWaitPeriod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if guestCluster { + ginkgo.By("Checking for resize on SVC PV") + verifyPVSizeinSupervisor(svcPVCName, newSize) + } + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("queryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verifying disk size requested in volume expansion is honored") + newSizeInMb := int64(3072) + if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb { + err = fmt.Errorf("got wrong disk size after volume expansion") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create a new Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating a new pod to attach PV again to the node") + pod, err = createPod(client, namespace, nil, []*corev1.PersistentVolumeClaim{pvc}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s", + volumeID, pod.Spec.NodeName)) + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + pvcConditions := pvc.Status.Conditions + expectEqual(len(pvcConditions), 0, "pvc should not have conditions") + + ginkgo.By("Verify block device size inside pod after expansion") + blockDevSize, err := getBlockDevSizeInBytes(f, namespace, pod, devicePath) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if blockDevSize <= originalBlockDevSize { + framework.Failf("error updating volume size for %q. Resulting volume size is %d", pvc.Name, blockDevSize) + } + ginkgo.By("Resized volume attached successfully") + + // Verify original data on raw block volume after expansion + ginkgo.By(fmt.Sprintf("Verify previously written data on raw volume attached to: %v at path %v", pod.Name, + pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyDataFromRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, 0, 1) + // Write data on expanded space to verify the expansion is successful and accessible. + ginkgo.By(fmt.Sprintf("Writing new 1mb test data in file %v", testdataFile)) + op, err = exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Write testdata of 1MB size from offset=%v on raw volume at path %v inside pod %v", + volsizeInMiBBeforeExpansion, pod.Spec.Containers[0].VolumeDevices[0].DevicePath, pod.Name)) + verifyIOOnRawBlockVolume(namespace, pod.Name, devicePath, testdataFile, + volsizeInMiBBeforeExpansion, 1) + + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the new pod %s in namespace %s after expansion", pod.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is detached from the node after expansion") + isDiskDetached, err = e2eVSphere.waitForVolumeDetachedFromNode(client, volumeID, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volumeID, pod.Spec.NodeName)) + if guestCluster { + ginkgo.By("Waiting for 30 seconds to allow CnsNodeVMAttachment controller to reconcile resource") + time.Sleep(waitTimeForCNSNodeVMAttachmentReconciler) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, false) + } + }) + + /* + Test snapshot restore operation with raw block volume + Steps: + 1. Create a storage class (eg: vsan default) and create a pvc using this sc + 2. Write some data on source volume + 3. Create a VolumeSnapshot class with snapshotter as vsphere-csi-driver and set deletionPolicy to Delete + 4. Create a volume-snapshot with labels, using the above snapshot-class and pvc (from step-1) as source + 5. Ensure the snapshot is created, verify using get VolumeSnapshot + 6. Also verify that VolumeSnapshotContent is auto-created + 7. Verify the references to pvc and volume-snapshot on this object + 8. Verify that the VolumeSnapshot has ready-to-use set to True + 9. Verify that the Restore Size set on the snapshot is same as that of the source volume size + 10. Query the snapshot from CNS side using volume id - should pass and return the snapshot entry + 11. Restore the snapshot to another pvc + 12. Verify previous data written on source volume is present on restored volume + 13. Delete the above snapshot from k8s side using kubectl delete, run a get and ensure it is removed + 14. Also ensure that the VolumeSnapshotContent is deleted along with the + volume snapshot as the policy is delete + 15. Query the snapshot from CNS side - should return 0 entries + 16. Cleanup: Delete PVC, SC (validate they are removed) + */ + ginkgo.It("[block-vanilla-snapshot] [tkg-snapshot] Verify snapshot dynamic provisioning workflow with "+ + "raw block volume", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if vanillaCluster { + scParameters[scParamDatastoreURL] = datastoreURL + } else if guestCluster { + scParameters[svStorageClassName] = storagePolicyName + } + + ginkgo.By("Create storage class") + sc, err := createStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating source raw block PVC") + pvcspec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", sc, nil, "") + pvcspec.Spec.VolumeMode = &rawBlockVolumeMode + pvc1, err := fpv.CreatePVC(client, namespace, pvcspec) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) + + ginkgo.By("Expect source volume claim to provision volume successfully") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, []*corev1.PersistentVolumeClaim{pvc1}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(volumeID) + } + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvc1.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Verify using CNS Query API if VolumeID retrieved from PV is present. + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volumeID)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + gomega.Expect(queryResult.Volumes[0].VolumeId.Id).To(gomega.Equal(volumeID)) + + ginkgo.By("Creating pod to attach source PV to the node") + pod1, err := createPod(client, namespace, nil, []*corev1.PersistentVolumeClaim{pvc1}, + false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod1.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + var vmUUID string + nodeName := pod1.Spec.NodeName + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod1.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod1.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID, nodeName)) + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volumeID, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + + // Write and read some data on raw block volume inside the pod. + // Use same devicePath for raw block volume here as used inside podSpec by createPod(). + // Refer setVolumes() for more information on naming of devicePath. + volumeIndex := 1 + devicePath := fmt.Sprintf("%v%v", pod_devicePathPrefix, volumeIndex) + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 1mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By(fmt.Sprintf("Write and read data on source raw volume attached to: %v at path %v", pod1.Name, + pod1.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod1.Name, devicePath, testdataFile, 0, 1) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := createVolumeSnapshotClass(ctx, snapc, deletionPolicy) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if vanillaCluster { + err = snapc.SnapshotV1().VolumeSnapshotClasses().Delete(ctx, volumeSnapshotClass.Name, + metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Create a dynamic volume snapshot") + volumeSnapshot, snapshotContent, snapshotCreated, + snapshotContentCreated, snapshotId, err := createDynamicVolumeSnapshot(ctx, namespace, snapc, volumeSnapshotClass, + pvc1, volumeID, diskSize) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if snapshotContentCreated { + framework.Logf("Deleting volume snapshot content") + err = deleteVolumeSnapshotContent(ctx, snapshotContent, snapc, namespace, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if snapshotCreated { + framework.Logf("Deleting volume snapshot") + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeletedWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + // Restore volumeSnapshot to another PVC + ginkgo.By("Restore volume snapshot to another raw block PVC") + restorePvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, diskSize, sc, nil, + corev1.ReadWriteOnce, volumeSnapshot.Name, snapshotapigroup) + restorePvcSpec.Spec.VolumeMode = &rawBlockVolumeMode + restoredPvc, err := fpv.CreatePVC(client, namespace, restorePvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + restoredPvs, err := fpv.WaitForPVClaimBoundPhase(client, + []*corev1.PersistentVolumeClaim{restoredPvc}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID2 := restoredPvs[0].Spec.CSI.VolumeHandle + gomega.Expect(volumeID2).NotTo(gomega.BeEmpty()) + if guestCluster { + volumeID2 = getVolumeIDFromSupervisorCluster(volumeID2) + } + defer func() { + ginkgo.By("Deleting the restored PVC") + err := fpv.DeletePersistentVolumeClaim(client, restoredPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Wait for the restored PVC to disappear in CNS") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Create a Pod to use this PVC, and verify volume has been attached + ginkgo.By("Creating pod to attach restored PVC to the node") + pod2, err := createPod(client, namespace, nil, []*corev1.PersistentVolumeClaim{restoredPvc}, false, + "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + nodeName = pod2.Spec.NodeName + if vanillaCluster { + vmUUID = getNodeUUID(ctx, client, pod2.Spec.NodeName) + } else if guestCluster { + vmUUID, err = getVMUUIDFromNodeName(pod2.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID2, nodeName)) + isDiskAttached, err = e2eVSphere.isVolumeAttachedToVM(client, volumeID2, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") + defer func() { + // Delete POD + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod2.Name, namespace)) + err = fpod.DeletePodWithWait(client, pod2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", volumeID2, nodeName)) + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, volumeID2, nodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", volumeID2, nodeName)) + if guestCluster { + ginkgo.By("Waiting for 30 seconds to allow CnsNodeVMAttachment controller to reconcile resource") + time.Sleep(waitTimeForCNSNodeVMAttachmentReconciler) + verifyCRDInSupervisorWithWait(ctx, f, pod2.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, false) + } + }() + + // Verify previously written data. Later perform another write and verify it. + ginkgo.By(fmt.Sprintf("Verify previously written data on restored volume attached to: %v at path %v", pod2.Name, + pod2.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyDataFromRawBlockVolume(namespace, pod2.Name, devicePath, testdataFile, 0, 1) + ginkgo.By(fmt.Sprintf("Writing new 1mb test data in file %v", testdataFile)) + op, err = exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Write and read data on restored volume attached to: %v at path %v", pod2.Name, + pod2.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod2.Name, devicePath, testdataFile, 0, 1) + + ginkgo.By("Delete dyanmic volume snapshot") + snapshotCreated, snapshotContentCreated, err = deleteVolumeSnapshot(ctx, snapc, namespace, + volumeSnapshot, pandoraSyncWaitTime, volumeID, snapshotId) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) +}) diff --git a/tests/e2e/staging_env_basic.go b/tests/e2e/staging_env_basic.go index 75cc15f911..38eef1a3ff 100644 --- a/tests/e2e/staging_env_basic.go +++ b/tests/e2e/staging_env_basic.go @@ -37,9 +37,11 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" fdep "k8s.io/kubernetes/test/e2e/framework/deployment" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" "k8s.io/kubernetes/test/e2e/framework/manifest" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" @@ -190,7 +192,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal statefulset.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort = port statefulset.Spec.Template.Spec.Containers[0].Ports[0].Name = "web" + val + strconv.Itoa(min) statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storagePolicyName + Spec.StorageClassName = &storagePolicyName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -423,7 +425,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal statefulset.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort = port statefulset.Spec.Template.Spec.Containers[0].Ports[0].Name = "web" + val + strconv.Itoa(min) statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storagePolicyName + Spec.StorageClassName = &storagePolicyName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -662,7 +664,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal statefulset.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort = port statefulset.Spec.Template.Spec.Containers[0].Ports[0].Name = "web" + val + strconv.Itoa(min) statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storagePolicyName + Spec.StorageClassName = &storagePolicyName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -733,7 +735,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal statefulset = GetResizedStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storagePolicyName + Spec.StorageClassName = &storagePolicyName CreateStatefulSet(namespace, statefulset, client) replicas = *(statefulset.Spec.Replicas) @@ -984,7 +986,7 @@ var _ = ginkgo.Describe("[csi-supervisor-staging] Tests for WCP env with minimal ginkgo.By("Verify the volume is accessible and filegroup type is as expected") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "ls -lh /mnt/volume1/fstype "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, strconv.Itoa(int(fsGroup)))).NotTo(gomega.BeFalse()) gomega.Expect(strings.Contains(output, strconv.Itoa(int(runAsUser)))).NotTo(gomega.BeFalse()) @@ -1275,7 +1277,7 @@ func createPODandVerifyVolumeMountWithoutF(ctx context.Context, client clientset "Volume is not attached to the node volHandle: %s, vmUUID: %s", volHandle, vmUUID) ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1288,7 +1290,7 @@ func getFSSizeMbWithoutF(namespace string, pod *v1.Pod) (int64, error) { var err error cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} - output = framework.RunKubectlOrDie(namespace, cmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, ext4FSType)).NotTo(gomega.BeFalse()) arrMountOut := strings.Fields(string(output)) diff --git a/tests/e2e/statefulset_with_topology.go b/tests/e2e/statefulset_with_topology.go index d233bf92a5..df4f6bfb07 100644 --- a/tests/e2e/statefulset_with_topology.go +++ b/tests/e2e/statefulset_with_topology.go @@ -157,6 +157,12 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + // Creating statefulset with 3 replicas ginkgo.By("Creating statefulset with 3 replica") statefulset := GetStatefulSetFromManifest(namespace) diff --git a/tests/e2e/statefulset_xfs.go b/tests/e2e/statefulset_xfs.go index 457b7a3fb7..8899191e39 100644 --- a/tests/e2e/statefulset_xfs.go +++ b/tests/e2e/statefulset_xfs.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" @@ -94,7 +95,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] st ginkgo.By("Creating statefulset") statefulset := GetStatefulSetFromManifest(namespace) statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -115,7 +116,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] st // Check filesystem used to mount volume inside pod is as expeted ginkgo.By("Verify if filesystem used to mount volume is xfs as expected") cmd := []string{"exec", sspod.Name, "--", "/bin/sh", "-c", "mount | grep /usr/share/nginx/html"} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) ginkgo.By(fmt.Sprintf("Mount information: %s", output)) gomega.Expect(strings.Contains(output, xfsFSType)).NotTo(gomega.BeFalse(), "filesystem used should be xfs") diff --git a/tests/e2e/statefulsets.go b/tests/e2e/statefulsets.go index 58ef0eb432..13b5ea996e 100644 --- a/tests/e2e/statefulsets.go +++ b/tests/e2e/statefulsets.go @@ -64,13 +64,11 @@ var _ = ginkgo.Describe("statefulset", func() { f := framework.NewDefaultFramework("e2e-vsphere-statefulset") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( - namespace string - client clientset.Interface - storagePolicyName string - scParameters map[string]string - storageClassName string - sshClientConfig *ssh.ClientConfig - nimbusGeneratedK8sVmPwd string + namespace string + client clientset.Interface + storagePolicyName string + scParameters map[string]string + storageClassName string ) ginkgo.BeforeEach(func() { @@ -84,15 +82,6 @@ var _ = ginkgo.Describe("statefulset", func() { gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) } - nimbusGeneratedK8sVmPwd = GetAndExpectStringEnvVar(nimbusK8sVmPwd) - - sshClientConfig = &ssh.ClientConfig{ - User: "root", - Auth: []ssh.AuthMethod{ - ssh.Password(nimbusGeneratedK8sVmPwd), - }, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } scParameters = make(map[string]string) storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) @@ -114,7 +103,7 @@ var _ = ginkgo.Describe("statefulset", func() { }) ginkgo.It("[csi-block-vanilla] [csi-supervisor] [csi-block-vanilla-parallelized]"+ - "Statefulset testing with default podManagementPolicy", func() { + "Statefulset testing with default a podManagementPolicy", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("Creating StorageClass for Statefulset") @@ -148,7 +137,7 @@ var _ = ginkgo.Describe("statefulset", func() { statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -346,7 +335,7 @@ var _ = ginkgo.Describe("statefulset", func() { *(statefulset.Spec.Replicas) = 8 statefulset.Spec.PodManagementPolicy = apps.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName ginkgo.By("Creating statefulset") CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -544,7 +533,7 @@ var _ = ginkgo.Describe("statefulset", func() { statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -603,7 +592,7 @@ var _ = ginkgo.Describe("statefulset", func() { statefulset = GetResizedStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas = *(statefulset.Spec.Replicas) @@ -659,7 +648,7 @@ var _ = ginkgo.Describe("statefulset", func() { }) /* - Verify List volume Response on vsphere-ccsi-controller logs + Verify List volume Response on vsphere-csi-controller logs Note: ist volume Threshold is set to 1 , and query limit set to 3 1. Create SC 2. Create statefull set with 3 replica @@ -676,10 +665,12 @@ var _ = ginkgo.Describe("statefulset", func() { 12. Inncrease the CSI driver replica to 3 */ - ginkgo.It("[csi-block-vanilla] ListVolumeResponse Validation", func() { + ginkgo.It("[csi-block-vanilla] [csi-supervisor] ListVolumeResponse Validation", func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var svcMasterPswd string var volumesBeforeScaleUp []string + var sshClientConfig *ssh.ClientConfig containerName := "vsphere-csi-controller" ginkgo.By("Creating StorageClass for Statefulset") // decide which test setup is available to run @@ -688,12 +679,13 @@ var _ = ginkgo.Describe("statefulset", func() { scParameters = nil storageClassName = "nginx-sc-default" } else { - ginkgo.By("CNS_TEST: Running for WCP setup") + storageClassName = defaultNginxStorageClassName + ginkgo.By("Running for WCP setup") + profileID := e2eVSphere.GetSpbmPolicyID(storagePolicyName) scParameters[scParamStoragePolicyID] = profileID - storageClassName = defaultNginxStorageClassName // create resource quota - createResourceQuota(client, namespace, rqLimit, defaultNginxStorageClassName) + createResourceQuota(client, namespace, rqLimit, storageClassName) } ginkgo.By("scale down CSI driver POD to 1 , so that it will" + @@ -725,7 +717,7 @@ var _ = ginkgo.Describe("statefulset", func() { statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageClassName + Spec.StorageClassName = &storageClassName CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready @@ -757,7 +749,29 @@ var _ = ginkgo.Describe("statefulset", func() { time.Sleep(pollTimeoutShort) ginkgo.By("Validate ListVolume Response for all the volumes") - logMessage := "List volume response: entries:" + var logMessage string + if vanillaCluster { + logMessage = "List volume response: entries:" + nimbusGeneratedK8sVmPwd := GetAndExpectStringEnvVar(nimbusK8sVmPwd) + sshClientConfig = &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedK8sVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + } + if supervisorCluster { + logMessage = "ListVolumes:" + svcMasterPswd = GetAndExpectStringEnvVar(svcMasterPassword) + sshClientConfig = &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password(svcMasterPswd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + } _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, volumesBeforeScaleUp) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -780,17 +794,19 @@ var _ = ginkgo.Describe("statefulset", func() { _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Delete volume from CNS and verify the error message") - logMessage = "difference between number of K8s volumes and CNS volumes is greater than threshold" - _, err = e2eVSphere.deleteCNSvolume(volumesBeforeScaleUp[0], false) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = e2eVSphere.deleteCNSvolume(volumesBeforeScaleUp[1], false) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - //List volume responses will show up in the interval of every 1 minute. - //To see the error, It is required to wait for 1 min after deleteting few Volumes - time.Sleep(pollTimeoutShort) - _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if vanillaCluster { + ginkgo.By("Delete volume from CNS and verify the error message") + logMessage = "difference between number of K8s volumes and CNS volumes is greater than threshold" + _, err = e2eVSphere.deleteCNSvolume(volumesBeforeScaleUp[0], false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = e2eVSphere.deleteCNSvolume(volumesBeforeScaleUp[1], false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + //List volume responses will show up in the interval of every 1 minute. + //To see the error, It is required to wait for 1 min after deleteting few Volumes + time.Sleep(pollTimeoutShort) + _, _, err = getCSIPodWhereListVolumeResponseIsPresent(ctx, client, sshClientConfig, containerName, logMessage, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } replicas = 0 ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas)) diff --git a/tests/e2e/storage_policy_utils.go b/tests/e2e/storage_policy_utils.go index 27904a8ae0..76cb675022 100644 --- a/tests/e2e/storage_policy_utils.go +++ b/tests/e2e/storage_policy_utils.go @@ -24,10 +24,11 @@ import ( "strings" "time" + "github.com/davecgh/go-spew/spew" "github.com/onsi/gomega" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/pbm" - "github.com/vmware/govmomi/pbm/types" + pbmtypes "github.com/vmware/govmomi/pbm/types" "github.com/vmware/govmomi/vapi/tags" vim25types "github.com/vmware/govmomi/vim25/types" "k8s.io/kubernetes/test/e2e/framework" @@ -35,7 +36,7 @@ import ( // createVmfsStoragePolicy create a vmfs policy with given allocation type and category/tag map func createVmfsStoragePolicy(ctx context.Context, pbmClient *pbm.Client, allocationType string, - categoryTagMap map[string]string) (*types.PbmProfileId, string) { + categoryTagMap map[string]string) (*pbmtypes.PbmProfileId, string) { s1 := rand.NewSource(time.Now().UnixNano()) r1 := rand.New(s1) profileName := fmt.Sprintf("vmfs-policy-%v-%v", time.Now().UnixNano(), strconv.Itoa(r1.Intn(1000))) @@ -83,8 +84,8 @@ func createVmfsStoragePolicy(ctx context.Context, pbmClient *pbm.Client, allocat } // deleteStoragePolicy deletes the given storage policy -func deleteStoragePolicy(ctx context.Context, pbmClient *pbm.Client, profileID *types.PbmProfileId) { - _, err := pbmClient.DeleteProfile(ctx, []types.PbmProfileId{*profileID}) +func deleteStoragePolicy(ctx context.Context, pbmClient *pbm.Client, profileID *pbmtypes.PbmProfileId) { + _, err := pbmClient.DeleteProfile(ctx, []pbmtypes.PbmProfileId{*profileID}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -151,7 +152,7 @@ func getDsMoRefFromURL(ctx context.Context, dsURL string) vim25types.ManagedObje // createTagBasedPolicy creates a tag based storage policy with given tag and category map func createTagBasedPolicy(ctx context.Context, pbmClient *pbm.Client, - categoryTagMap map[string]string) (*types.PbmProfileId, string) { + categoryTagMap map[string]string) (*pbmtypes.PbmProfileId, string) { s1 := rand.NewSource(time.Now().UnixNano()) r1 := rand.New(s1) profileName := fmt.Sprintf("shared-ds-policy-%v-%v", time.Now().UnixNano(), strconv.Itoa(r1.Intn(1000))) @@ -186,3 +187,237 @@ func createTagBasedPolicy(ctx context.Context, pbmClient *pbm.Client, return profileID, profileName } + +// updateVmfsPolicyAlloctype updates the given policy's allocation type to given type +func updateVmfsPolicyAlloctype( + ctx context.Context, pbmClient *pbm.Client, allocationType string, policyName string, + policyId *pbmtypes.PbmProfileId) error { + + updateSpec := pbmtypes.PbmCapabilityProfileUpdateSpec{ + Name: policyName, + Constraints: &pbmtypes.PbmCapabilitySubProfileConstraints{ + SubProfiles: []pbmtypes.PbmCapabilitySubProfile{ + { + Capability: []pbmtypes.PbmCapabilityInstance{ + { + Id: pbmtypes.PbmCapabilityMetadataUniqueId{ + Id: "VolumeAllocationType", + Namespace: "com.vmware.storage.volumeallocation", + }, + Constraint: []pbmtypes.PbmCapabilityConstraintInstance{ + { + PropertyInstance: []pbmtypes.PbmCapabilityPropertyInstance{ + { + Id: "VolumeAllocationType", + Value: allocationType, + }, + }, + }, + }, + }, + }, + Name: "volumeallocation.capabilityobjectschema.namespaceInfo.info.label rules", + }, + }, + }, + } + err := pbmClient.UpdateProfile(ctx, *policyId, updateSpec) + if err != nil { + return err + } + policyContent, err := pbmClient.RetrieveContent(ctx, []pbmtypes.PbmProfileId{*policyId}) + if err != nil { + return err + } + framework.Logf("policy content after update", spew.Sdump(policyContent)) + return nil +} + +// createVsanDStoragePolicy create a vsand storage policy with given volume allocation type and category/tag map +func createVsanDStoragePolicy(ctx context.Context, pbmClient *pbm.Client, allocationType string, + categoryTagMap map[string]string) (*pbmtypes.PbmProfileId, string) { + s1 := rand.NewSource(time.Now().UnixNano()) + r1 := rand.New(s1) + profileName := fmt.Sprintf("vsand-policy-%v-%v", time.Now().UnixNano(), strconv.Itoa(r1.Intn(1000))) + pbmCreateSpec := pbm.CapabilityProfileCreateSpec{ + Name: profileName, + Description: "VSAND test policy", + Category: "REQUIREMENT", + CapabilityList: []pbm.Capability{ + { + ID: "vSANDirectVolumeAllocation", + Namespace: "vSANDirect", + PropertyList: []pbm.Property{ + { + ID: "vSANDirectVolumeAllocation", + Value: allocationType, + DataType: "string", + }, + }, + }, + { + ID: "vSANDirectType", + Namespace: "vSANDirect", + PropertyList: []pbm.Property{ + { + ID: "vSANDirectType", + Value: "vSANDirect", + DataType: "string", + }, + }, + }, + }, + } + for k, v := range categoryTagMap { + + pbmCreateSpec.CapabilityList = append(pbmCreateSpec.CapabilityList, pbm.Capability{ + ID: k, + Namespace: "http://www.vmware.com/storage/tag", + PropertyList: []pbm.Property{ + { + ID: "com.vmware.storage.tag." + k + ".property", + Value: v, + DataType: "set", + }, + }, + }) + } + createSpecVSAND, err := pbm.CreateCapabilityProfileSpec(pbmCreateSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + profileID, err := pbmClient.CreateProfile(ctx, *createSpecVSAND) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("VSAND profile with id: %v and name: '%v' created", profileID.UniqueId, profileName) + + return profileID, profileName +} + +// createStoragePolicyWithSharedVmfsNVsand create a storage policy with vmfs and vsand rules +// with given volume allocation type and category/tag map +func createStoragePolicyWithSharedVmfsNVsand(ctx context.Context, pbmClient *pbm.Client, + allocationType string, categoryTagMap map[string]string) (*pbmtypes.PbmProfileId, string) { + s1 := rand.NewSource(time.Now().UnixNano()) + r1 := rand.New(s1) + var category, tag string + profileName := fmt.Sprintf("storage-policy-%v-%v", time.Now().UnixNano(), strconv.Itoa(r1.Intn(1000))) + + for k, v := range categoryTagMap { + category = k + tag = v + } + framework.Logf("category: %v, tag: %v", category, tag) + + pbmCapabilityProfileSpec := pbmtypes.PbmCapabilityProfileCreateSpec{ + Name: profileName, + Description: "VSAND-VMFS test policy", + Category: "REQUIREMENT", + ResourceType: pbmtypes.PbmProfileResourceType{ + ResourceType: string(pbmtypes.PbmProfileResourceTypeEnumSTORAGE), + }, + Constraints: &pbmtypes.PbmCapabilitySubProfileConstraints{ + SubProfiles: []pbmtypes.PbmCapabilitySubProfile{ + { + Capability: []pbmtypes.PbmCapabilityInstance{ + { + Id: pbmtypes.PbmCapabilityMetadataUniqueId{ + Id: "vSANDirectVolumeAllocation", + Namespace: "vSANDirect", + }, + Constraint: []pbmtypes.PbmCapabilityConstraintInstance{ + { + PropertyInstance: []pbmtypes.PbmCapabilityPropertyInstance{ + { + Id: "vSANDirectVolumeAllocation", + Value: allocationType, + }, + }, + }, + }, + }, + { + Id: pbmtypes.PbmCapabilityMetadataUniqueId{ + Id: "vSANDirectType", + Namespace: "vSANDirect", + }, + Constraint: []pbmtypes.PbmCapabilityConstraintInstance{ + { + PropertyInstance: []pbmtypes.PbmCapabilityPropertyInstance{ + { + Id: "vSANDirectType", + Value: "vSANDirect", + }, + }, + }, + }, + }, + { + Id: pbmtypes.PbmCapabilityMetadataUniqueId{ + Id: category, + Namespace: "http://www.vmware.com/storage/tag", + }, + Constraint: []pbmtypes.PbmCapabilityConstraintInstance{ + { + PropertyInstance: []pbmtypes.PbmCapabilityPropertyInstance{ + { + Id: "com.vmware.storage.tag." + category + ".property", + Value: pbmtypes.PbmCapabilityDiscreteSet{ + Values: []vim25types.AnyType{tag}, + }, + }, + }, + }, + }, + }, + }, + Name: "vsandirect rules", + }, + { + Capability: []pbmtypes.PbmCapabilityInstance{ + { + Id: pbmtypes.PbmCapabilityMetadataUniqueId{ + Id: "VolumeAllocationType", + Namespace: "com.vmware.storage.volumeallocation", + }, + Constraint: []pbmtypes.PbmCapabilityConstraintInstance{ + { + PropertyInstance: []pbmtypes.PbmCapabilityPropertyInstance{ + { + Id: "VolumeAllocationType", + Value: allocationType, + }, + }, + }, + }, + }, + { + Id: pbmtypes.PbmCapabilityMetadataUniqueId{ + Id: category, + Namespace: "http://www.vmware.com/storage/tag", + }, + Constraint: []pbmtypes.PbmCapabilityConstraintInstance{ + { + PropertyInstance: []pbmtypes.PbmCapabilityPropertyInstance{ + { + Id: "com.vmware.storage.tag." + category + ".property", + Value: pbmtypes.PbmCapabilityDiscreteSet{ + Values: []vim25types.AnyType{tag}, + }, + }, + }, + }, + }, + }, + }, + Name: "vmfs rules", + }, + }, + }, + } + profileID, err := pbmClient.CreateProfile(ctx, pbmCapabilityProfileSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("VSAND and vmfs profile with id: %v and name: '%v' created", profileID.UniqueId, profileName) + + return profileID, profileName +} diff --git a/tests/e2e/storagepolicy.go b/tests/e2e/storagepolicy.go index 65c976e82e..1347e86484 100644 --- a/tests/e2e/storagepolicy.go +++ b/tests/e2e/storagepolicy.go @@ -78,10 +78,12 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ ginkgo.AfterEach(func() { if supervisorCluster { deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } }) diff --git a/tests/e2e/svmotion_detached_volume.go b/tests/e2e/svmotion_detached_volume.go deleted file mode 100644 index fb90ce8822..0000000000 --- a/tests/e2e/svmotion_detached_volume.go +++ /dev/null @@ -1,444 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "context" - "fmt" - "math/rand" - "os" - "strings" - "time" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - pbmtypes "github.com/vmware/govmomi/pbm/types" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - fnodes "k8s.io/kubernetes/test/e2e/framework/node" - fpod "k8s.io/kubernetes/test/e2e/framework/pod" - fpv "k8s.io/kubernetes/test/e2e/framework/pv" - admissionapi "k8s.io/pod-security-admission/api" -) - -/* - Test to verify sVmotion works fine for volumes in detached state - - Steps - 1. Create StorageClass. - 2. Create PVC. - 3. Expect PVC to pass and verified it is created correctly. - 4. Relocate detached volume - 5. Invoke CNS Query API and validate datastore URL value changed correctly -*/ - -var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Relocate detached volume ", func() { - f := framework.NewDefaultFramework("svmotion-detached-disk") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - var ( - client clientset.Interface - namespace string - scParameters map[string]string - datastoreURL string - sourceDatastore *object.Datastore - destDatastore *object.Datastore - datacenter *object.Datacenter - destDsURL string - pvclaims []*v1.PersistentVolumeClaim - fcdID string - ) - ginkgo.BeforeEach(func() { - bootstrap() - client = f.ClientSet - namespace = f.Namespace.Name - scParameters = make(map[string]string) - datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) - destDsURL = GetAndExpectStringEnvVar(destinationDatastoreURL) - nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) - framework.ExpectNoError(err, "Unable to find ready and schedulable Node") - if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") - } - }) - - // Test for relocating volume being detached state - ginkgo.It("Verify relocating detached volume works fine", func() { - ginkgo.By("Invoking Test for relocating detached volume") - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - finder := find.NewFinder(e2eVSphere.Client.Client, false) - cfg, err := getConfig() - var datacenters []string - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - dcList := strings.Split(cfg.Global.Datacenters, - ",") - for _, dc := range dcList { - dcName := strings.TrimSpace(dc) - if dcName != "" { - datacenters = append(datacenters, dcName) - } - } - - for _, dc := range datacenters { - datacenter, err = finder.Datacenter(ctx, dc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - finder.SetDatacenter(datacenter) - sourceDatastore, err = getDatastoreByURL(ctx, datastoreURL, datacenter) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - destDatastore, err = getDatastoreByURL(ctx, destDsURL, datacenter) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - scParameters[scParamDatastoreURL] = datastoreURL - storageclass, pvclaim, err := createPVCAndStorageClass(client, - namespace, nil, scParameters, "", nil, "", false, "") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - defer func() { - err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Expect claim to provision volume successfully") - err = fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, - pvclaim.Namespace, pvclaim.Name, framework.Poll, time.Minute) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") - - pvclaims = append(pvclaims, pvclaim) - - persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - fcdID = persistentvolumes[0].Spec.CSI.VolumeHandle - - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", - persistentvolumes[0].Spec.CSI.VolumeHandle)) - queryResult, err := e2eVSphere.queryCNSVolumeWithResult(persistentvolumes[0].Spec.CSI.VolumeHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - if len(queryResult.Volumes) == 0 { - err = fmt.Errorf("error: QueryCNSVolumeWithResult returned no volume") - } - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verifying disk is created on the specified datastore") - if queryResult.Volumes[0].DatastoreUrl != datastoreURL { - err = fmt.Errorf("disk is created on the wrong datastore") - } - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Relocate volume - err = e2eVSphere.relocateFCD(ctx, fcdID, sourceDatastore.Reference(), destDatastore.Reference()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to finish FCD relocation:%s to sync with pandora", - defaultPandoraSyncWaitTime, fcdID)) - time.Sleep(time.Duration(defaultPandoraSyncWaitTime) * time.Second) - - // verify disk is relocated to the specified destination datastore - ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s after relocating the disk", - persistentvolumes[0].Spec.CSI.VolumeHandle)) - queryResult, err = e2eVSphere.queryCNSVolumeWithResult(persistentvolumes[0].Spec.CSI.VolumeHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - if len(queryResult.Volumes) == 0 { - err = fmt.Errorf("error: QueryCNSVolumeWithResult returned no volume") - } - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Verifying disk is relocated to the specified datastore") - if queryResult.Volumes[0].DatastoreUrl != destDsURL { - err = fmt.Errorf("disk is relocated on the wrong datastore") - } - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - /* Online relocation of volume using cnsRelocate Volume API - STEPS: - 1. Create a tag based policy with 2 shared vmfs datastores(sharedVmfs-0 and sharedVmfs-1). - 2. Create SC with storage policy created in step 1. - 3. Create a PVC with sc created in step 2 and wait for it to come to bound state. - 4. Create a pod with the pvc created in step 3 and wait for it to come to Running state. - 5. Relocate volume from one shared datastore to another datastore using - CnsRelocateVolume API and verify the datastore of fcd after migration and volume compliance. - 6. Delete pod,pvc and sc. - */ - ginkgo.It("Online relocation of volume using cnsRelocate Volume API", func() { - ginkgo.By("Invoking Test for offline relocation") - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) - if sharedvmfsURL == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) - } - - sharedvmfs2URL := os.Getenv(envSharedVMFSDatastore2URL) - if sharedvmfs2URL == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastore2URL)) - } - datastoreUrls := []string{sharedvmfsURL, sharedvmfs2URL} - - govmomiClient := newClient(ctx, &e2eVSphere) - pc := newPbmClient(ctx, govmomiClient) - scParameters := make(map[string]string) - pvcs := []*v1.PersistentVolumeClaim{} - - ginkgo.By("Creating tag and category to tag datastore") - - rand.New(rand.NewSource(time.Now().UnixNano())) - suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) - categoryName := "category" + suffix - tagName := "tag" + suffix - - catID, tagID := createCategoryNTag(ctx, categoryName, tagName) - defer func() { - deleteCategoryNTag(ctx, catID, tagID) - }() - - ginkgo.By("Attaching tag to shared vmfs datastores") - - attachTagToDS(ctx, tagID, sharedvmfsURL) - defer func() { - detachTagFromDS(ctx, tagID, sharedvmfsURL) - }() - - attachTagToDS(ctx, tagID, sharedvmfs2URL) - defer func() { - detachTagFromDS(ctx, tagID, sharedvmfs2URL) - }() - - policyID, policyName := createTagBasedPolicy( - ctx, pc, map[string]string{categoryName: tagName}) - defer func() { - deleteStoragePolicy(ctx, pc, policyID) - }() - scParameters[scParamStoragePolicyName] = policyName - storageclass, pvclaim, err := createPVCAndStorageClass(client, - namespace, nil, scParameters, "", nil, "", false, "") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pvcs = append(pvcs, pvclaim) - - defer func() { - ginkgo.By("Delete the SCs created") - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Deleted the SPBM polices created") - _, err = pc.DeleteProfile(ctx, []pbmtypes.PbmProfileId{*policyID}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volumeID := pvs[0].Spec.CSI.VolumeHandle - - defer func() { - err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(pvs[0].Spec.CSI.VolumeHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") - storagePolicyExists, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) - e2eVSphere.verifyVolumeCompliance(volumeID, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") - - ginkgo.By("Creating a pod") - pod, err := createPod(client, namespace, nil, pvcs, false, "") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - filePath := "/mnt/volume1/file1.txt" - filePath2 := "/mnt/volume1/file2.txt" - - //Write data on file.txt on Pod - data := "This file is written by Pod" - ginkgo.By("write to a file in pod") - writeDataOnFileFromPod(namespace, pod.Name, filePath, data) - - defer func() { - ginkgo.By("Delete the pod created") - err := fpod.DeletePodWithWait(client, pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Verify if VolumeID is created on the given datastores") - dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) - framework.Logf("Volume is present on %s", dsUrlWhereVolumeIsPresent) - e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) - - // Get the destination ds url where the volume will get relocated - destDsUrl := "" - for _, dsurl := range datastoreUrls { - if dsurl != dsUrlWhereVolumeIsPresent { - destDsUrl = dsurl - } - } - - ginkgo.By("Relocate volume from one shared datastore to another datastore using" + - "CnsRelocateVolume API") - dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) - err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) - - ginkgo.By("Verify that the relocated CNS volumes are compliant and have correct policy id") - storagePolicyExists, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) - e2eVSphere.verifyVolumeCompliance(volumeID, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") - - //Read file1.txt created from Pod - ginkgo.By("Read file.txt from Pod created by Pod") - output := readFileFromPod(namespace, pod.Name, filePath) - ginkgo.By(fmt.Sprintf("File contents from file.txt are: %s", output)) - data = data + "\n" - gomega.Expect(output == data).To(gomega.BeTrue(), "data verification failed after relocation") - - data = "Writing some data to pod post relocation" - ginkgo.By("writing to a file in pod post relocation") - writeDataOnFileFromPod(namespace, pod.Name, filePath2, data) - - ginkgo.By("Read file.txt created by Pod") - output = readFileFromPod(namespace, pod.Name, filePath2) - ginkgo.By(fmt.Sprintf("File contents from file.txt are: %s", output)) - data = data + "\n" - gomega.Expect(output == data).To(gomega.BeTrue(), "data verification failed after relocation") - - }) - - /* Offline relocation of volume using cnsRelocate Volume API - STEPS: - 1. Create a tag based policy with 2 shared vmfs datastores(sharedVmfs-0 and sharedVmfs-1). - 2. Create SC with storage policy created in step 1. - 3. Create a PVC with sc created in step 2 and wait for it to come to bound state. - 4. Relocate volume from one shared datastore to another datastore - using CnsRelocateVolume API and verify the datastore of fcd after migration and volume compliance. - 5. Delete pvc and sc. - */ - ginkgo.It("Offline relocation of volume using cnsRelocate Volume API", func() { - ginkgo.By("Invoking Test for offline relocation") - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) - if sharedvmfsURL == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) - } - - sharedvmfs2URL := os.Getenv(envSharedVMFSDatastore2URL) - if sharedvmfs2URL == "" { - ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastore2URL)) - } - datastoreUrls := []string{sharedvmfsURL, sharedvmfs2URL} - - govmomiClient := newClient(ctx, &e2eVSphere) - pc := newPbmClient(ctx, govmomiClient) - scParameters := make(map[string]string) - pvcs := []*v1.PersistentVolumeClaim{} - - rand.New(rand.NewSource(time.Now().UnixNano())) - suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) - categoryName := "category" + suffix - tagName := "tag" + suffix - - ginkgo.By("Creating tag and category to tag datastore") - - catID, tagID := createCategoryNTag(ctx, categoryName, tagName) - defer func() { - deleteCategoryNTag(ctx, catID, tagID) - }() - - ginkgo.By("Attaching tag to shared vmfs datastores") - - attachTagToDS(ctx, tagID, sharedvmfsURL) - defer func() { - detachTagFromDS(ctx, tagID, sharedvmfsURL) - }() - - attachTagToDS(ctx, tagID, sharedvmfs2URL) - defer func() { - detachTagFromDS(ctx, tagID, sharedvmfs2URL) - }() - - policyID, policyName := createTagBasedPolicy( - ctx, pc, map[string]string{categoryName: tagName}) - defer func() { - deleteStoragePolicy(ctx, pc, policyID) - }() - scParameters[scParamStoragePolicyName] = policyName - storageclass, pvclaim, err := createPVCAndStorageClass(client, - namespace, nil, scParameters, "", nil, "", false, "") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - pvcs = append(pvcs, pvclaim) - - defer func() { - ginkgo.By("Delete the SCs created") - err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Verify the PVCs created in step 3 are bound") - pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volumeID := pvs[0].Spec.CSI.VolumeHandle - - defer func() { - err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(pvs[0].Spec.CSI.VolumeHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") - storagePolicyExists, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) - e2eVSphere.verifyVolumeCompliance(volumeID, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") - - ginkgo.By("Verify if VolumeID is created on the given datastores") - dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) - framework.Logf("Volume is present on %s", dsUrlWhereVolumeIsPresent) - e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) - - // Get the destination ds url where the volume will get relocated - destDsUrl := "" - for _, dsurl := range datastoreUrls { - if dsurl != dsUrlWhereVolumeIsPresent { - destDsUrl = dsurl - } - } - - dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) - err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) - - ginkgo.By("Verify that the relocated CNS volumes are compliant and have correct policy id") - storagePolicyExists, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) - e2eVSphere.verifyVolumeCompliance(volumeID, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(storagePolicyExists).To(gomega.BeTrue(), "storage policy verification failed") - - }) -}) diff --git a/tests/e2e/svmotion_volumes.go b/tests/e2e/svmotion_volumes.go new file mode 100644 index 0000000000..7526408fd5 --- /dev/null +++ b/tests/e2e/svmotion_volumes.go @@ -0,0 +1,1377 @@ +/* +Copyright 2019-2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "math/rand" + "os" + "os/exec" + "strings" + "sync" + "time" + + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + cnstypes "github.com/vmware/govmomi/cns/types" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + pbmtypes "github.com/vmware/govmomi/pbm/types" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpod "k8s.io/kubernetes/test/e2e/framework/pod" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" +) + +/* + Test to verify sVmotion works fine for volumes in detached state + + Steps + 1. Create StorageClass. + 2. Create PVC. + 3. Expect PVC to pass and verified it is created correctly. + 4. Relocate detached volume + 5. Invoke CNS Query API and validate datastore URL value changed correctly +*/ + +var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Relocate detached volume ", func() { + f := framework.NewDefaultFramework("svmotion-disk") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + scParameters map[string]string + datastoreURL string + sourceDatastore *object.Datastore + destDatastore *object.Datastore + datacenter *object.Datacenter + destDsURL string + pvclaims []*v1.PersistentVolumeClaim + fcdID string + labelKey string + labelValue string + pvc10g string + ) + ginkgo.BeforeEach(func() { + bootstrap() + client = f.ClientSet + namespace = f.Namespace.Name + scParameters = make(map[string]string) + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + destDsURL = GetAndExpectStringEnvVar(destinationDatastoreURL) + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + labelKey = "app" + labelValue = "e2e-labels" + pvc10g = "10Gi" + }) + + // Test for relocating volume being detached state + ginkgo.It("Verify relocating detached volume works fine", func() { + ginkgo.By("Invoking Test for relocating detached volume") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + finder := find.NewFinder(e2eVSphere.Client.Client, false) + cfg, err := getConfig() + var datacenters []string + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dcList := strings.Split(cfg.Global.Datacenters, + ",") + for _, dc := range dcList { + dcName := strings.TrimSpace(dc) + if dcName != "" { + datacenters = append(datacenters, dcName) + } + } + + for _, dc := range datacenters { + datacenter, err = finder.Datacenter(ctx, dc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(datacenter) + sourceDatastore, err = getDatastoreByURL(ctx, datastoreURL, datacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + destDatastore, err = getDatastoreByURL(ctx, destDsURL, datacenter) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Create storageclass and PVC from that storageclass") + scParameters[scParamDatastoreURL] = datastoreURL + storageclass, pvclaim, err := createPVCAndStorageClass(client, + namespace, nil, scParameters, "", nil, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete Storageclass and PVC") + err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Expect claim to provision volume successfully") + err = fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, + pvclaim.Namespace, pvclaim.Name, framework.Poll, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume") + + pvclaims = append(pvclaims, pvclaim) + + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + fcdID = persistentvolumes[0].Spec.CSI.VolumeHandle + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", + persistentvolumes[0].Spec.CSI.VolumeHandle)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(persistentvolumes[0].Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("error: QueryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verifying disk is created on the specified datastore") + if queryResult.Volumes[0].DatastoreUrl != datastoreURL { + err = fmt.Errorf("disk is created on the wrong datastore") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Relocate volume + ginkgo.By("Relocating FCD to different datastore") + err = e2eVSphere.relocateFCD(ctx, fcdID, sourceDatastore.Reference(), destDatastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to finish FCD relocation:%s to sync with pandora", + defaultPandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(defaultPandoraSyncWaitTime) * time.Second) + + // verify disk is relocated to the specified destination datastore + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s after relocating the disk", + persistentvolumes[0].Spec.CSI.VolumeHandle)) + queryResult, err = e2eVSphere.queryCNSVolumeWithResult(persistentvolumes[0].Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("error: QueryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verifying disk is relocated to the specified datastore") + if queryResult.Volumes[0].DatastoreUrl != destDsURL { + err = fmt.Errorf("disk is relocated on the wrong datastore") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + /* Online relocation of volume using cnsRelocate Volume API + STEPS: + 1. Create a tag based policy with 2 shared vmfs datastores(sharedVmfs-0 and sharedVmfs-1). + 2. Create SC with storage policy created in step 1. + 3. Create a PVC with sc created in step 2 and wait for it to come to bound state. + 4. Create a pod with the pvc created in step 3 and wait for it to come to Running state. + 5. Relocate volume from one shared datastore to another datastore using + CnsRelocateVolume API and verify the datastore of fcd after migration and volume compliance. + 6. Delete pod,pvc and sc. + */ + ginkgo.It("Online relocation of volume using cnsRelocate Volume API", func() { + ginkgo.By("Invoking Test for offline relocation") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedvmfs2URL := os.Getenv(envSharedVMFSDatastore2URL) + if sharedvmfs2URL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastore2URL)) + } + datastoreUrls := []string{sharedvmfsURL, sharedvmfs2URL} + + govmomiClient := newClient(ctx, &e2eVSphere) + pc := newPbmClient(ctx, govmomiClient) + scParameters := make(map[string]string) + pvcs := []*v1.PersistentVolumeClaim{} + + ginkgo.By("Creating tag and category to tag datastore") + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + ginkgo.By("Attaching tag to shared vmfs datastores") + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedvmfs2URL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfs2URL) + }() + + ginkgo.By("Create Tag Based policy with shared datstores") + policyID, policyName := createTagBasedPolicy( + ctx, pc, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + + ginkgo.By("Create Storageclass and a PVC from storageclass created") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err := createPVCAndStorageClass(client, + namespace, nil, scParameters, "", nil, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim) + + defer func() { + ginkgo.By("Delete the SCs created") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Deleted the SPBM polices created") + _, err = pc.DeleteProfile(ctx, []pbmtypes.PbmProfileId{*policyID}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pvs[0].Spec.CSI.VolumeHandle + + defer func() { + ginkgo.By("Delete the PVCs created in test") + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(pvs[0].Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + + ginkgo.By("Creating a pod") + pod, err := createPod(client, namespace, nil, pvcs, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete the pod created") + err := fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + filePath := "/mnt/volume1/file1.txt" + filePath2 := "/mnt/volume1/file2.txt" + + //Write data on file.txt on Pod + data := "This file is written by Pod" + ginkgo.By("write to a file in pod") + writeDataOnFileFromPod(namespace, pod.Name, filePath, data) + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + break + } + } + + ginkgo.By("Relocate volume from one shared datastore to another datastore using" + + "CnsRelocateVolume API") + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + _, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) + + ginkgo.By("Verify that the relocated CNS volumes are compliant and have correct policy id") + storagePolicyMatches, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + + //Read file1.txt created from Pod + ginkgo.By("Read file.txt from Pod created by Pod") + output := readFileFromPod(namespace, pod.Name, filePath) + ginkgo.By(fmt.Sprintf("File contents from file.txt are: %s", output)) + data = data + "\n" + gomega.Expect(output == data).To(gomega.BeTrue(), "data verification failed after relocation") + + data = "Writing some data to pod post relocation" + ginkgo.By("writing to a file in pod post relocation") + writeDataOnFileFromPod(namespace, pod.Name, filePath2, data) + + ginkgo.By("Read file.txt created by Pod") + output = readFileFromPod(namespace, pod.Name, filePath2) + ginkgo.By(fmt.Sprintf("File contents from file.txt are: %s", output)) + data = data + "\n" + gomega.Expect(output == data).To(gomega.BeTrue(), "data verification failed after relocation") + + }) + + /* Offline relocation of volume using cnsRelocate Volume API + STEPS: + 1. Create a tag based policy with 2 shared vmfs datastores(sharedVmfs-0 and sharedVmfs-1). + 2. Create SC with storage policy created in step 1. + 3. Create a PVC with sc created in step 2 and wait for it to come to bound state. + 4. Relocate volume from one shared datastore to another datastore + using CnsRelocateVolume API and verify the datastore of fcd after migration and volume compliance. + 5. Delete pvc and sc. + */ + ginkgo.It("Offline relocation of volume using cnsRelocate Volume API", func() { + ginkgo.By("Invoking Test for offline relocation") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sharedvmfsURL := os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedvmfs2URL := os.Getenv(envSharedVMFSDatastore2URL) + if sharedvmfs2URL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastore2URL)) + } + datastoreUrls := []string{sharedvmfsURL, sharedvmfs2URL} + + govmomiClient := newClient(ctx, &e2eVSphere) + pc := newPbmClient(ctx, govmomiClient) + scParameters := make(map[string]string) + pvcs := []*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + ginkgo.By("Creating tag and category to tag datastore") + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + ginkgo.By("Attaching tag to shared vmfs datastores") + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedvmfs2URL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfs2URL) + }() + + ginkgo.By("Create Tag Based policy with shared datstores") + policyID, policyName := createTagBasedPolicy( + ctx, pc, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + + ginkgo.By("Create Storageclass and a PVC from storageclass created") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err := createPVCAndStorageClass(client, + namespace, nil, scParameters, "", nil, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim) + + defer func() { + ginkgo.By("Delete the SCs created") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pvs[0].Spec.CSI.VolumeHandle + + defer func() { + ginkgo.By("Delete the PVCs created in test") + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(pvs[0].Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + break + } + } + + ginkgo.By("Relocate volume from one shared datastore to another datastore using" + + "CnsRelocateVolume API") + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + _, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) + + ginkgo.By("Verify that the relocated CNS volumes are compliant and have correct policy id") + storagePolicyMatches, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + + }) + + /* + Start attached volume's relocation and then expand it + Steps: + 1. Create a SPBM policy with tag based rules. + 2. Create SC using policy created in step 1. + 3. Create PVC using SC created in step 2 and and wait for it to be bound. + 4. Create a pod with the pod created in step 3. + 5. Start writing some IO to pod which run in parallel to steps 6-7. + 6. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 7. While relocation is running resize the volume. + 8. Verify the IO written so far. + 9. Verify relocation was successful. + 10. Verify online volume expansion is successful. + 11. Delete all the objects created during the test. + */ + ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized]"+ + " Start attached volume's relocation and then expand it", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sharedvmfsURL, sharedVsanDatastoreURL := "", "" + var datastoreUrls []string + var policyName string + pc := newPbmClient(ctx, e2eVSphere.Client) + + sharedvmfsURL = os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedVsanDatastoreURL = os.Getenv(envSharedDatastoreURL) + if sharedVsanDatastoreURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL)) + } + datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedVsanDatastoreURL) + + scParameters := make(map[string]string) + pvcs := []*v1.PersistentVolumeClaim{} + pvclaims2d := [][]*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedVsanDatastoreURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedVsanDatastoreURL) + }() + + ginkgo.By("create SPBM policy with tag based rules") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var policyID *pbmtypes.PbmProfileId + + policyID, policyName = createTagBasedPolicy( + ctx, pc, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, pvc10g, nil, "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvcs = append(pvcs, pvclaim) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim}) + + defer func() { + ginkgo.By("Delete the SCs created in step 2") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + volumeID := pvs[0].Spec.CSI.VolumeHandle + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }() + + ginkgo.By("Create pods with using the PVCs created in step 3 and wait for them to be ready") + ginkgo.By("verify we can read and write on the PVCs") + pods := createMultiplePods(ctx, client, pvclaims2d, true) + defer func() { + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + break + } + } + + framework.Logf("dest url: %s", destDsUrl) + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 100mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=100").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + currentPvcSize := pvcs[0].Spec.Resources.Requests[v1.ResourceStorage] + newSize := currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse(diskSize)) + + originalSizeInMb, err := getFSSizeMb(f, pods[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Start relocation of volume to a different datastore") + task, err := e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Waiting for a few seconds for relocation to be started properly on VC") + time.Sleep(time.Duration(10) * time.Second) + data := "This file is written by Pod" + + ginkgo.By("Resize volume and writing IO to pod while relocating volume") + var wg sync.WaitGroup + wg.Add(2) + go writeDataToMultipleFilesOnPodInParallel(namespace, pods[0].Name, data, &wg) + go resize(client, pvcs[0], pvcs[0].Spec.Resources.Requests[v1.ResourceStorage], newSize, &wg) + wg.Wait() + + ginkgo.By("Wait for relocation task to complete") + + cnsFault := waitForCNSTaskToComplete(ctx, task) + if cnsFault != nil { + err = fmt.Errorf("failed to relocate volume=%+v", cnsFault) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Verify relocation of volume is successful") + e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) + + ginkgo.By("Wait and verify the file system resize on pvcs") + framework.Logf("Waiting for file system resize to finish for pvc %v", pvcs[0].Name) + pvcs[0], err = waitForFSResize(pvcs[0], client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvcConditions := pvcs[0].Status.Conditions + expectEqual(len(pvcConditions), 0, "pvc %v should not have status conditions", pvcs[0].Name) + + var fsSize int64 + framework.Logf("Verify filesystem size for mount point /mnt/volume1 for pod %v", pods[0].Name) + fsSize, err = getFSSizeMb(f, pods[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %v", fsSize) + gomega.Expect(fsSize > originalSizeInMb).To(gomega.BeTrue(), + fmt.Sprintf( + "filesystem size %v is not > than before expansion %v for pvc %q", + fsSize, originalSizeInMb, pvcs[0].Name)) + + framework.Logf("File system resize finished successfully for pvc %v", pvcs[0].Name) + + ginkgo.By("Verify the data on the PVCs match what was written in step 7") + for i := 0; i < 10; i++ { + filePath := fmt.Sprintf("/mnt/volume1/file%v.txt", i) + output := readFileFromPod(namespace, pods[0].Name, filePath) + ginkgo.By(fmt.Sprintf("File contents from file%v.txt are: %s", i, output)) + dataToVerify := data + "\n" + gomega.Expect(output == dataToVerify).To(gomega.BeTrue(), "data verification failed after relocation") + } + + storagePolicyMatches, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podsNew := createMultiplePods(ctx, client, pvclaims2d, true) + deletePodsAndWaitForVolsToDetach(ctx, client, podsNew, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* + Start attached volume's expansion and then relocate it + Steps: + 1. Create a SPBM policy with tag based rules. + 2. Create SC using policy created in step 1. + 3. Create PVC using SC created in step 2 and and wait for it to be bound. + 4. Create a pod with the pod created in step 3. + 5. Start writing some IO to pod which run in parallel to steps 6-7. + 6. Resize the volume. + 7. While expansion is running relocate the volume allocation to different datastore. + 8. Verify the IO written so far. + 9. Verify relocation was successful. + 10. Verify online volume expansion is successful. + 11. Delete all the objects created during the test. + */ + ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized]"+ + " Start attached volume's expansion and then relocate it", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sharedvmfsURL, sharedVsanDatastoreURL := "", "" + var datastoreUrls []string + var policyName string + pc := newPbmClient(ctx, e2eVSphere.Client) + + sharedvmfsURL = os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedVsanDatastoreURL = os.Getenv(envSharedDatastoreURL) + if sharedVsanDatastoreURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL)) + } + datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedVsanDatastoreURL) + + scParameters := make(map[string]string) + pvcs := []*v1.PersistentVolumeClaim{} + pvclaims2d := [][]*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedVsanDatastoreURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedVsanDatastoreURL) + }() + + ginkgo.By("create SPBM policy with tag based rules") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var policyID *pbmtypes.PbmProfileId + + policyID, policyName = createTagBasedPolicy( + ctx, pc, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, pvc10g, nil, "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvcs = append(pvcs, pvclaim) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim}) + + defer func() { + ginkgo.By("Delete the SCs created in step 2") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + volumeID := pvs[0].Spec.CSI.VolumeHandle + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }() + + ginkgo.By("Create pods with using the PVCs created in step 3 and wait for them to be ready") + ginkgo.By("verify we can read and write on the PVCs") + pods := createMultiplePods(ctx, client, pvclaims2d, true) + defer func() { + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + break + } + } + + framework.Logf("dest url: %s", destDsUrl) + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 100mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=100").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + currentPvcSize := pvcs[0].Spec.Resources.Requests[v1.ResourceStorage] + newSize := currentPvcSize.DeepCopy() + newSize.Add(resource.MustParse(diskSize)) + + originalSizeInMb, err := getFSSizeMb(f, pods[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Start online expansion of volume") + framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) + _, err = expandPVCSize(pvcs[0], newSize, client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + data := "This file is written by Pod" + + ginkgo.By("Relocate volume and writing IO to pod while resizing volume") + var wg sync.WaitGroup + wg.Add(2) + go writeDataToMultipleFilesOnPodInParallel(namespace, pods[0].Name, data, &wg) + go cnsRelocateVolumeInParallel(e2eVSphere, ctx, volumeID, dsRefDest, true, &wg) + wg.Wait() + + ginkgo.By("Verify relocation of volume is successful") + e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) + + ginkgo.By("Wait and verify the file system resize on pvcs") + framework.Logf("Waiting for file system resize to finish for pvc %v", pvcs[0].Name) + pvcs[0], err = waitForFSResize(pvcs[0], client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvcConditions := pvcs[0].Status.Conditions + expectEqual(len(pvcConditions), 0, "pvc %v should not have status conditions", pvcs[0].Name) + + var fsSize int64 + framework.Logf("Verify filesystem size for mount point /mnt/volume1 for pod %v", pods[0].Name) + fsSize, err = getFSSizeMb(f, pods[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %v", fsSize) + gomega.Expect(fsSize > originalSizeInMb).To(gomega.BeTrue(), + fmt.Sprintf( + "filesystem size %v is not > than before expansion %v for pvc %q", + fsSize, originalSizeInMb, pvcs[0].Name)) + + framework.Logf("File system resize finished successfully for pvc %v", pvcs[0].Name) + + ginkgo.By("Verify the data on the PVCs match what was written in step 7") + for i := 0; i < 10; i++ { + filePath := fmt.Sprintf("/mnt/volume1/file%v.txt", i) + output := readFileFromPod(namespace, pods[0].Name, filePath) + ginkgo.By(fmt.Sprintf("File contents from file%v.txt are: %s", i, output)) + dataToVerify := data + "\n" + gomega.Expect(output == dataToVerify).To(gomega.BeTrue(), "data verification failed after relocation") + } + + storagePolicyMatches, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podsNew := createMultiplePods(ctx, client, pvclaims2d, true) + deletePodsAndWaitForVolsToDetach(ctx, client, podsNew, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* + Start volume relocation and then update its metadata + Steps: + 1. Create a SPBM policy with tag based rules. + 2. Create SC using policy created in step 1. + 3. Create PVC using SC created in step 2 and and wait for it to be bound. + 4. Create a pod with the pod created in step 3. + 5. Start writing some IO to pod which run in parallel to steps 6-7. + 6. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 7. While relocation is running add labels to PV and PVC. + 8. Verify the IO written so far. + 9. Verify relocation was successful. + 10. Verify the labels in cnsvolume metadata post relocation. + 11. Delete all the objects created during the test. + */ + ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized]"+ + " Start volume relocation and then update its metadata", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sharedvmfsURL, sharedNfsURL := "", "" + var datastoreUrls []string + var policyName string + pc := newPbmClient(ctx, e2eVSphere.Client) + + sharedvmfsURL = os.Getenv(envSharedVMFSDatastoreURL) + if sharedvmfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedVMFSDatastoreURL)) + } + + sharedNfsURL = os.Getenv(envSharedNFSDatastoreURL) + if sharedNfsURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", sharedNfsURL)) + } + datastoreUrls = append(datastoreUrls, sharedvmfsURL, sharedNfsURL) + + scParameters := make(map[string]string) + policyNames := []string{} + pvcs := []*v1.PersistentVolumeClaim{} + pvclaims2d := [][]*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedvmfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedvmfsURL) + }() + + attachTagToDS(ctx, tagID, sharedNfsURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedNfsURL) + }() + + ginkgo.By("create SPBM policy with tag based rules") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var policyID *pbmtypes.PbmProfileId + + policyID, policyName = createTagBasedPolicy( + ctx, pc, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + policyNames = append(policyNames, policyName) + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, pvc10g, nil, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvcs = append(pvcs, pvclaim) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim}) + + defer func() { + ginkgo.By("Delete the SCs created in step 2") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + + volumeID := pvs[0].Spec.CSI.VolumeHandle + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }() + + ginkgo.By("Create pods with using the PVCs created in step 3 and wait for them to be ready") + ginkgo.By("verify we can read and write on the PVCs") + pods := createMultiplePods(ctx, client, pvclaims2d, true) + defer func() { + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + break + } + } + + framework.Logf("dest url: %s", destDsUrl) + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 100mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=100").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + labels := make(map[string]string) + labels[labelKey] = labelValue + + ginkgo.By("Start relocation of volume to a different datastore") + task, err := e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Waiting for a few seconds for relocation to be started properly on VC") + time.Sleep(time.Duration(10) * time.Second) + + ginkgo.By("Adding labels to volume and writing IO to pod whle relocating volume") + var wg sync.WaitGroup + wg.Add(3) + go writeKnownData2PodInParallel(f, pods[0], testdataFile, &wg) + go updatePvcLabelsInParallel(ctx, client, namespace, labels, pvcs, &wg) + go updatePvLabelsInParallel(ctx, client, namespace, labels, pvs, &wg) + wg.Wait() + + ginkgo.By("Wait for relocation task to complete") + cnsFault := waitForCNSTaskToComplete(ctx, task) + if cnsFault != nil { + err = fmt.Errorf("failed to relocate volume=%+v", cnsFault) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Verify relocation of volume is successful") + e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) + + ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pvc %s in namespace %s", + labels, pvclaim.Name, namespace)) + pv := getPvFromClaim(client, namespace, pvclaim.Name) + err = e2eVSphere.waitForLabelsToBeUpdated(pv.Spec.CSI.VolumeHandle, labels, + string(cnstypes.CnsKubernetesEntityTypePVC), pvclaim.Name, pvclaim.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Waiting for labels %+v to be updated for pv %s", + labels, pv.Name)) + err = e2eVSphere.waitForLabelsToBeUpdated(pv.Spec.CSI.VolumeHandle, labels, + string(cnstypes.CnsKubernetesEntityTypePV), pv.Name, pv.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify the data on the PVCs match what was written in step 7") + verifyKnownDataInPod(f, pods[0], testdataFile) + + storagePolicyMatches, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podsNew := createMultiplePods(ctx, client, pvclaims2d, true) + deletePodsAndWaitForVolsToDetach(ctx, client, podsNew, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + + /* + Start volume relocation and take a snapshot + Steps: + 1. Create a SPBM policy with tag based rules. + 2. Create SC using policy created in step 1. + 3. Create PVC using SC created in step 2 and and wait for it to be bound. + 4. Create a pod with the pod created in step 3. + 5. Start writing some IO to pod which run in parallel to steps 6-7. + 6. Relocate CNS volume corresponding to pvc from step 3 to a different datastore. + 7. While relocation is running create a CSI snapshot. + 8. Verify the IO written so far. + 9. Verify relocation was successful + 10. Verify snapshot creation was successful. + 11. Delete all the objects created during the test. + */ + ginkgo.It("[csi-block-vanilla][csi-block-vanilla-parallelized]"+ + " Start volume relocation and take a snapshot", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sharedNfsUrl, sharedVsanDsurl := "", "" + var datastoreUrls []string + var policyName string + pc := newPbmClient(ctx, e2eVSphere.Client) + + sharedVsanDsurl = os.Getenv(envSharedDatastoreURL) + if sharedVsanDsurl == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL)) + } + + sharedNfsUrl = os.Getenv(envSharedNFSDatastoreURL) + if sharedNfsUrl == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedNFSDatastoreURL)) + } + datastoreUrls = append(datastoreUrls, sharedNfsUrl, sharedVsanDsurl) + + scParameters := make(map[string]string) + policyNames := []string{} + pvcs := []*v1.PersistentVolumeClaim{} + pvclaims2d := [][]*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + attachTagToDS(ctx, tagID, sharedVsanDsurl) + defer func() { + detachTagFromDS(ctx, tagID, sharedVsanDsurl) + }() + + attachTagToDS(ctx, tagID, sharedNfsUrl) + defer func() { + detachTagFromDS(ctx, tagID, sharedNfsUrl) + }() + + ginkgo.By("create SPBM policy with tag based rules") + ginkgo.By("create a storage class with a SPBM policy created from step 1") + ginkgo.By("create a PVC each using the storage policy created from step 2") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + var policyID *pbmtypes.PbmProfileId + + policyID, policyName = createTagBasedPolicy( + ctx, pc, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + + policyNames = append(policyNames, policyName) + + framework.Logf("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamStoragePolicyName] = policyName + storageclass, pvclaim, err = createPVCAndStorageClass(client, + namespace, nil, scParameters, pvc10g, nil, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvcs, pvclaim) + pvclaims2d = append(pvclaims2d, []*v1.PersistentVolumeClaim{pvclaim}) + + defer func() { + if vanillaCluster { + ginkgo.By("Delete the SCs created in step 2") + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + volumeID := pvs[0].Spec.CSI.VolumeHandle + if guestCluster { + volumeID = getVolumeIDFromSupervisorCluster(pvs[0].Spec.CSI.VolumeHandle) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + } + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + defer func() { + ginkgo.By("Delete the PVCs created in step 3") + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }() + + ginkgo.By("Create pods with using the PVCs created in step 3 and wait for them to be ready") + ginkgo.By("verify we can read and write on the PVCs") + pods := createMultiplePods(ctx, client, pvclaims2d, true) + defer func() { + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + }() + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, datastoreUrls) + + // Get the destination ds url where the volume will get relocated + destDsUrl := "" + for _, dsurl := range datastoreUrls { + if dsurl != dsUrlWhereVolumeIsPresent { + destDsUrl = dsurl + break + } + } + + framework.Logf("dest url: %s", destDsUrl) + dsRefDest := getDsMoRefFromURL(ctx, destDsUrl) + + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 100mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=100").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + snaps := []*snapV1.VolumeSnapshot{} + snapIDs := []string{} + + //Get snapshot client using the rest config + restConfig := getRestConfigClient() + snapc, err := snapclient.NewForConfig(restConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create volume snapshot class") + volumeSnapshotClass, err := snapc.SnapshotV1().VolumeSnapshotClasses().Create(ctx, + getVolumeSnapshotClassSpec(snapV1.DeletionPolicy("Delete"), nil), metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Volume snapshot class with name %q created", volumeSnapshotClass.Name) + + defer func() { + err := snapc.SnapshotV1().VolumeSnapshotClasses().Delete( + ctx, volumeSnapshotClass.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Start relocation of volume to a different datastore") + task, err := e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Waiting for a few seconds for relocation to be started properly on VC") + time.Sleep(time.Duration(10) * time.Second) + + ginkgo.By("Create a snapshot of volume and writing IO to pod while relocating volume") + ch := make(chan *snapV1.VolumeSnapshot) + lock := &sync.Mutex{} + var wg sync.WaitGroup + wg.Add(2) + go writeKnownData2PodInParallel(f, pods[0], testdataFile, &wg) + go createSnapshotInParallel(ctx, namespace, snapc, pvclaim.Name, volumeSnapshotClass.Name, + ch, lock, &wg) + go func() { + for v := range ch { + snaps = append(snaps, v) + } + }() + wg.Wait() + + ginkgo.By("Wait for relocation task to complete") + cnsFault := waitForCNSTaskToComplete(ctx, task) + if cnsFault != nil { + err = fmt.Errorf("failed to relocate volume=%+v", cnsFault) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By("Verify relocation of volume is successful") + e2eVSphere.verifyDatastoreMatch(volumeID, []string{destDsUrl}) + + volumeSnapshot := snaps[0] + ginkgo.By("Verify volume snapshot is created") + volumeSnapshot, err = waitForVolumeSnapshotReadyToUse(*snapc, ctx, namespace, volumeSnapshot.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("snapshot restore size is : %s", volumeSnapshot.Status.RestoreSize.String()) + gomega.Expect(volumeSnapshot.Status.RestoreSize.Cmp(pvclaim.Spec.Resources.Requests[v1.ResourceStorage])).To( + gomega.BeZero()) + + ginkgo.By("Verify volume snapshot content is created") + snapshotContent, err := snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(*snapshotContent.Status.ReadyToUse).To(gomega.BeTrue()) + + framework.Logf("Get volume snapshot ID from snapshot handle") + snapshothandle := *snapshotContent.Status.SnapshotHandle + snapshotId := strings.Split(snapshothandle, "+")[1] + snapIDs = append(snapIDs, snapshotId) + + defer func() { + if len(snaps) > 0 { + framework.Logf("Delete volume snapshot %v", volumeSnapshot.Name) + err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete( + ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Verify snapshot entry %v is deleted from CNS for volume %v", snapIDs[0], volumeID) + err = waitForCNSSnapshotToBeDeleted(volumeID, snapIDs[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + } + }() + + ginkgo.By("Query CNS and check the volume snapshot entry") + err = verifySnapshotIsCreatedInCNS(volumeID, snapshotId, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify the data on the PVCs match what was written in step 7") + verifyKnownDataInPod(f, pods[0], testdataFile) + + storagePolicyMatches, err = e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyNames[0]) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + e2eVSphere.verifyVolumeCompliance(volumeID, true) + + ginkgo.By("Delete the pod created") + deletePodsAndWaitForVolsToDetach(ctx, client, pods, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podsNew := createMultiplePods(ctx, client, pvclaims2d, true) + deletePodsAndWaitForVolsToDetach(ctx, client, podsNew, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) +}) diff --git a/tests/e2e/testing-manifests/statefulset/nginx/statefulset.yaml b/tests/e2e/testing-manifests/statefulset/nginx/statefulset.yaml index 79bef22a2c..d6041d047f 100644 --- a/tests/e2e/testing-manifests/statefulset/nginx/statefulset.yaml +++ b/tests/e2e/testing-manifests/statefulset/nginx/statefulset.yaml @@ -25,10 +25,9 @@ spec: volumeClaimTemplates: - metadata: name: www - annotations: - volume.beta.kubernetes.io/storage-class: nginx-sc spec: accessModes: [ "ReadWriteOnce" ] + storageClassName: nginx-sc resources: requests: storage: 1Gi \ No newline at end of file diff --git a/tests/e2e/testing-manifests/tkg/tkg.yaml b/tests/e2e/testing-manifests/tkg/tkg.yaml index 374fc16562..8a1f9f1328 100644 --- a/tests/e2e/testing-manifests/tkg/tkg.yaml +++ b/tests/e2e/testing-manifests/tkg/tkg.yaml @@ -1,4 +1,4 @@ -apiVersion: run.tanzu.vmware.com/v1alpha1 +apiVersion: run.tanzu.vmware.com/v1alpha3 kind: TanzuKubernetesCluster metadata: name: test-cluster-e2e-script @@ -6,22 +6,25 @@ metadata: spec: topology: controlPlane: - count: 3 - class: best-effort-xsmall # vmclass to be used for master(s) + tkr: + reference: + name: replaceImage # this will be replaced with the actual image name by pipeline + replicas: 3 + vmClass: best-effort-xsmall # vmclass to be used for master(s) storageClass: gc-storage-profile - workers: - count: 2 - class: best-effort-xsmall # vmclass to be used for workers(s) + nodePools: + - replicas: 2 + name: np-2 + vmClass: best-effort-xsmall # vmclass to be used for workers(s) storageClass: gc-storage-profile - distribution: - version: replaceImage # this will be replaced with the actual image name by pipeline settings: network: cni: - name: calico + name: antrea services: - cidrBlocks: ["198.51.100.0/12"] + cidrBlocks: + - 198.51.100.0/12 pods: - cidrBlocks: ["192.0.2.0/16"] - serviceDomain: "managedcluster.local" - + cidrBlocks: + - 192.0.2.0/16 + serviceDomain: cluster.local diff --git a/tests/e2e/tkgs_ha.go b/tests/e2e/tkgs_ha.go index 2f3fd18721..471e15c467 100644 --- a/tests/e2e/tkgs_ha.go +++ b/tests/e2e/tkgs_ha.go @@ -19,6 +19,7 @@ package e2e import ( "context" "fmt" + "os" "strconv" "strings" "time" @@ -59,6 +60,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { isSPSServiceStopped bool sshWcpConfig *ssh.ClientConfig svcMasterIp string + clientNewGc clientset.Interface ) ginkgo.BeforeEach(func() { client = f.ClientSet @@ -101,7 +103,16 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, rqLimit) } + }) + ginkgo.AfterEach(func() { + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } }) /* @@ -240,7 +251,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Creating statefulset") statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = 3 CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -786,7 +797,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { framework.Logf("allowedTopo: %v", allowedTopologies) statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name statefulset.Spec.Template.Spec.Affinity = new(v1.Affinity) statefulset.Spec.Template.Spec.Affinity.NodeAffinity = new(v1.NodeAffinity) statefulset.Spec.Template.Spec.Affinity.NodeAffinity. @@ -886,7 +897,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = 1 replicas := *(statefulset.Spec.Replicas) @@ -1085,7 +1096,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { ginkgo.By("Creating statefulset") statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = 3 CreateStatefulSet(namespace, statefulset, client) @@ -1194,7 +1205,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = statefulSetReplicaCount CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) @@ -1306,7 +1317,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = statefulSetReplicaCount CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) @@ -1509,7 +1520,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale) + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale, nil) _, err = fpv.WaitForPVClaimBoundPhase(client, pvclaimsList, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1711,7 +1722,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale) + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale, nil) _, err = fpv.WaitForPVClaimBoundPhase(client, pvclaimsList, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2159,7 +2170,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = replicas _, err := client.AppsV1().StatefulSets(namespace).Create(ctx, statefulset, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2259,7 +2270,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale) + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, volumeOpsScale, nil) pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvclaimsList, framework.ClaimProvisionTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2395,4 +2406,641 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SanityTests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) + + /* + Statefulset - storage class with Zonal storage and + Wffc and with default pod management policy with PodAffinity + 1. Create a zonal storage policy, on the datastore that is shared only to specific cluster + 2. Use the Zonal storage class and Wait for first consumer binding mode + and create statefulset + with parallel pod management policy with replica 3 and PodAntiAffinity + 3. wait for all the gc-PVC to bound - Make sure corresponding SVC-PVC will + have "csi.vsphere.volume-accessible-topology" annotation + csi.vsphere.requested.cluster-topology= + [{"topology.kubernetes.io/zone":"zone1"},{"topology.kubernetes.io/zone":"zone2"}, + {"topology.kubernetes.io/zone":"zone2"}] + 4. storageClassName: should point to gcStorageclass + 5. Wait for the PODs to reach running state - make sure Pod scheduled on appropriate nodes + preset in the availability zone + 6. Describe SVC-PV , and GC-PV and verify node affinity, make sure appropriate node affinity gets added + 7. Delete the above statefulset + 8. Create New statefulset with PODAffinity rules set + 9. Wait for all the PVC's and POD's to come up + 7. Scale up the statefulset replica to 5 , and validate the node affinity on + the newly create PV's and annotations on PVC's + 8. Validate the CNS metadata + 9. Scale down the sts to 0 + 10.Delete Statefulset,PVC,POD,SC + */ + ginkgo.It("Validate statefulset creation with POD affinity and POD Anti affinity", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By("CNS_TEST: Running for GC setup") + + cleanupsts := false + nodeList, _ := fnodes.GetReadySchedulableNodes(client) + + ginkgo.By("Create statefulset with parallel pod management policy with replica 3") + createResourceQuota(client, namespace, rqLimit, zonalWffcPolicy) + scParameters[svStorageClassName] = zonalWffcPolicy + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Creating StatefulSet service + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + statefulset := GetStatefulSetFromManifest(namespace) + ginkgo.By("Creating statefulset with POD Anti affinity") + allowedTopologies := getTopologySelector(allowedTopologyHAMap, categories, + tkgshaTopologyLevels) + framework.Logf("allowedTopo: %v", allowedTopologies) + statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Spec.StorageClassName = &storageclass.Name + statefulset.Spec.Template.Spec.Affinity = new(v1.Affinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity = new(v1.NodeAffinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution = new(v1.NodeSelector) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = getNodeSelectorTerms(allowedTopologies) + + statefulset.Spec.Template.Spec.Affinity.PodAntiAffinity = new(v1.PodAntiAffinity) + statefulset.Spec.Template.Spec.Affinity.PodAntiAffinity. + RequiredDuringSchedulingIgnoredDuringExecution = getPodAffinityTerm(allowedTopologyHAMap) + + *statefulset.Spec.Replicas = 3 + framework.Logf("Statefulset spec: %v", statefulset) + ginkgo.By("Create Statefulset with PodAntiAffinity") + cleanupsts = true + CreateStatefulSet(namespace, statefulset, client) + replicas := *(statefulset.Spec.Replicas) + + defer func() { + if cleanupsts { + framework.Logf("cleaning up statefulset with podAtiAffinity") + cleaupStatefulset(client, ctx, namespace, statefulset) + } + }() + + ginkgo.By("Verify annotations on SVC PV and required node affinity details on SVC PV and GC PV") + ginkgo.By("Verify pod gets scheduled on appropriate nodes preset in the availability zone") + verifyStsVolumeMetadata(client, ctx, namespace, statefulset, replicas, + allowedTopologyHAMap, categories, zonalPolicy, nodeList, f) + + ginkgo.By("Delete Statefulset with PodAntiAffinity") + cleaupStatefulset(client, ctx, namespace, statefulset) + cleanupsts = false + + ginkgo.By("Creating statefulset with POD-affinity") + statefulset = GetStatefulSetFromManifest(namespace) + allowedTopologies = getTopologySelector(allowedTopologyHAMap, categories, + tkgshaTopologyLevels) + framework.Logf("allowedTopo: %v", allowedTopologies) + statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Spec.StorageClassName = &storageclass.Name + statefulset.Spec.Template.Spec.Affinity = new(v1.Affinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity = new(v1.NodeAffinity) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution = new(v1.NodeSelector) + statefulset.Spec.Template.Spec.Affinity.NodeAffinity. + RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = getNodeSelectorTerms(allowedTopologies) + + statefulset.Spec.Template.Spec.Affinity.PodAffinity = new(v1.PodAffinity) + statefulset.Spec.Template.Spec.Affinity.PodAffinity. + RequiredDuringSchedulingIgnoredDuringExecution = getPodAffinityTerm(allowedTopologyHAMap) + + *statefulset.Spec.Replicas = 3 + framework.Logf("Statefulset spec: %v", statefulset) + ginkgo.By("Create Statefulset with PodAffinity") + CreateStatefulSet(namespace, statefulset, client) + replicas = *(statefulset.Spec.Replicas) + + defer func() { + framework.Logf("cleaning up statefulset with podAffinity") + cleaupStatefulset(client, ctx, namespace, statefulset) + }() + + framework.Logf("Verify statefulset volume metadata, node affinities and pod's availability on appropriate zone") + verifyStsVolumeMetadata(client, ctx, namespace, statefulset, replicas, + allowedTopologyHAMap, categories, zonalPolicy, nodeList, f) + + replicas = 5 + framework.Logf(fmt.Sprintf("Scaling up statefulset: %v to number of Replica: %v", + statefulset.Name, replicas)) + _, scaleupErr := fss.Scale(client, statefulset, replicas) + gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) + + fss.WaitForStatusReplicas(client, statefulset, replicas) + fss.WaitForStatusReadyReplicas(client, statefulset, replicas) + ssPodsAfterScaleUp := fss.GetPodList(client, statefulset) + gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset %s, %v, should match with number of replicas %v", + statefulset.Name, ssPodsAfterScaleUp.Size(), replicas, + ) + + verifyStsVolumeMetadata(client, ctx, namespace, statefulset, replicas, + allowedTopologyHAMap, categories, zonalPolicy, nodeList, f) + + }) + + /* + Verify volume provisioning after VC reboot using zonal storage + 1. Create few statefulsets , PVC's, deployment POD's using zonal SC's and note the details + 2. Re-boot VC and wait till all the services up and running + 3. Validate the Pre-data, sts's, PVC's and PODs's should be in up and running state + 4. Use the existing SC's and create stateful set with 3 replica. Make sure PVC's + reach bound state, POd's reach running state + 5. validate node affinity details on the gc-PV's and svc-pv's + 7. Create PVC using the zonal sc + 8. Wait for PVC to reach bound state and PV should have appropriate node affinity + 9. Create POD using the PVC created in step 9 , POD should come up on appropriate zone + 10. trigger online and offline volume expansion and validate + 11. delete all sts's , PVC's, SC and POD's + */ + ginkgo.It("Verify volume provisioning after VC reboot using zonal storage", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By("CNS_TEST: Running for GC setup") + nodeList, err := fnodes.GetReadySchedulableNodes(client) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + + ginkgo.By("Create 3 statefulsets with parallel pod management policy with replica 3") + createResourceQuota(client, namespace, rqLimit, zonalWffcPolicy) + scParameters[svStorageClassName] = zonalWffcPolicy + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalWffcPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + createResourceQuota(client, namespace, rqLimit, zonalPolicy) + scParameters[svStorageClassName] = zonalPolicy + storageclassImmediate, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + var stsList []*appsv1.StatefulSet + var deploymentList []*appsv1.Deployment + var replicas int32 + var pvclaims, pvcs, svcPVCs []*v1.PersistentVolumeClaim + var volumeHandles, svcPVCNames []string + var pods []*v1.Pod + volumeOpsScale := 3 + + // Creating StatefulSet service + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + + ginkgo.By("Creating 3 statefulsets with parallel pod management policy and 3 replicas") + + for i := 0; i < volumeOpsScale; i++ { + statefulset := GetStatefulSetFromManifest(namespace) + ginkgo.By("Creating statefulset") + statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + statefulset.Name = "sts-" + strconv.Itoa(i) + "-" + statefulset.Name + statefulset.Spec.Template.Labels["app"] = statefulset.Name + statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. + Spec.StorageClassName = &storageclass.Name + *statefulset.Spec.Replicas = 3 + CreateStatefulSet(namespace, statefulset, client) + stsList = append(stsList, statefulset) + } + replicas = 3 + + ginkgo.By("Creating 3 PVCs") + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc%v", i) + + pvclaim, err := createPVC(client, namespace, nil, "", storageclassImmediate, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvclaims = append(pvclaims, pvclaim) + } + + ginkgo.By("Expect all pvcs to provision volume successfully") + _, err = fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + labelsMap := make(map[string]string) + labelsMap["app"] = "test" + + ginkgo.By("Creating 3 deployment with each PVC created earlier") + + for i := 0; i < volumeOpsScale; i++ { + deployment, err := createDeployment( + ctx, client, 1, labelsMap, nil, namespace, []*v1.PersistentVolumeClaim{pvclaims[i]}, + "", false, busyBoxImageOnGcr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deploymentList = append(deploymentList, deployment) + + } + + ginkgo.By("Rebooting VC") + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + err = invokeVCenterReboot(vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitForHostToBeUp(e2eVSphere.Config.Global.VCenterHostname) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Done with reboot") + essentialServices := []string{spsServiceName, vsanhealthServiceName, vpxdServiceName, wcpServiceName} + checkVcenterServicesRunning(ctx, vcAddress, essentialServices, healthStatusPollTimeout) + + // After reboot. + bootstrap() + + defer func() { + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } + }() + + framework.Logf("After the VC reboot, Wait for all the PVC's to reach bound state") + _, err = fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("After the VC reboot, Verify all the pre-created deployment pod's, its status and metadata") + for _, deployment := range deploymentList { + pods, err := fdep.GetPodsForDeployment(client, deployment) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pod := pods.Items[0] + err = fpod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + verifyVolumeMetadataOnDeployments(ctx, client, deployment, namespace, allowedTopologyHAMap, + categories, nodeList, zonalPolicy) + + } + + framework.Logf("After the VC reboot, Verify all the pre-created stateful set metadata") + for _, sts := range stsList { + verifyStsVolumeMetadata(client, ctx, namespace, sts, replicas, + allowedTopologyHAMap, categories, zonalPolicy, nodeList, f) + } + + replicas = 5 + framework.Logf(fmt.Sprintf("Increase statefulset %v to number of Replica: %v", + stsList[0].Name, replicas)) + time.Sleep(60 * time.Second) + _, scaleupErr := fss.Scale(client, stsList[0], replicas) + gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) + fss.WaitForStatusReplicas(client, stsList[0], replicas) + fss.WaitForStatusReadyReplicas(client, stsList[0], replicas) + ssPodsAfterScaleUp := fss.GetPodList(client, stsList[0]) + gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", stsList[0].Name)) + gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset %s, %v, should match with number of replicas %v", + stsList[0].Name, ssPodsAfterScaleUp.Size(), replicas, + ) + + ginkgo.By("Creating Pvc with Immediate topology storageclass") + ginkgo.By("Creating 3 PVCs for volume expansion") + for i := 0; i < volumeOpsScale; i++ { + framework.Logf("Creating pvc%v", i) + + pvc, err := createPVC(client, namespace, nil, "", storageclassImmediate, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvcs = append(pvclaims, pvc) + } + + ginkgo.By("Wait for GC PVC to come to bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(client, pvcs, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, pv := range pvs { + volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + volumeHandles = append(volumeHandles, volHandle) + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + svcPVCName := pv.Spec.CSI.VolumeHandle + svcPVCNames = append(svcPVCNames, svcPVCName) + svcPVC := getPVCFromSupervisorCluster(svcPVCName) + svcPVCs = append(svcPVCs, svcPVC) + } + + ginkgo.By("Create a pod and wait for it to come to Running state") + for _, pvc := range pvcs { + pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pods = append(pods, pod) + } + + defer func() { + ginkgo.By("Delete pods") + for _, pod := range pods { + err = fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + + ginkgo.By("Verify annotations on SVC PV and required node affinity details on SVC PV and GC PV") + ginkgo.By("Verify pod gets scheduled on appropriate nodes preset in the availability zone") + for i, pv := range pvs { + verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pods[i], + nodeList, svcPVCs[i], pv, svcPVCNames[i]) + } + + ginkgo.By("Triggering online volume expansion on PVCs") + for i := range pods { + verifyOnlineVolumeExpansionOnGc(client, namespace, svcPVCNames[i], + volumeHandles[i], pvcs[i], pods[i], f) + } + + ginkgo.By("Triggering offline volume expansion on PVCs") + for i := range pods { + verifyOfflineVolumeExpansionOnGc(client, pvcs[i], svcPVCNames[i], namespace, + volumeHandles[i], pods[i], pvs[i], f) + } + }) + + /* + Static volume provisioning using zonal storage + 1. Create a zonal storage policy, on the datastore that is shared only to specific cluster + 2. Use the Zonal storage class and Immediate binding mode + 3. Create svcpvc and wait for it to bound + 4. switch to gc1 and statically create PV and PVC pointing to svc-pvc + 5. Verify topology details on PV + 6. Delete GC1 PVC + 7. switch to GC2 + 8. Create static pvc on gc2PVC point to svc-pvc + 9. Verify the node affinity of gc1-pv and svc-pv + 9. Create POD, verify the status. + 10. Wait for the PODs to reach running state - make sure Pod scheduled on + appropriate nodes preset in the availability zone + 10. Delete pod, gc1-pv and gc1-pvc and svc pvc. + */ + ginkgo.It("tkgs-ha Verify static provisioning across Guest Clusters", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + newGcKubconfigPath := os.Getenv("NEW_GUEST_CLUSTER_KUBE_CONFIG") + if newGcKubconfigPath == "" { + ginkgo.Skip("Env NEW_GUEST_CLUSTER_KUBE_CONFIG is missing") + } + + svClient, svNamespace := getSvcClientAndNamespace() + pvcAnnotations := make(map[string]string) + annotationVal := "[" + var topoList []string + + for key, val := range allowedTopologyHAMap { + for _, topoVal := range val { + str := `{"` + key + `":"` + topoVal + `"}` + topoList = append(topoList, str) + } + } + framework.Logf("topoList: %v", topoList) + annotationVal += strings.Join(topoList, ",") + "]" + pvcAnnotations[tkgHARequestedAnnotationKey] = annotationVal + framework.Logf("annotationVal :%s, pvcAnnotations: %v", annotationVal, pvcAnnotations) + + ginkgo.By("Creating Pvc with Immediate topology storageclass") + createResourceQuota(client, namespace, rqLimit, zonalPolicy) + scParameters[svStorageClassName] = zonalPolicy + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + pvcSpec := getPersistentVolumeClaimSpecWithStorageClass(svNamespace, "", storageclass, nil, "") + pvcSpec.Annotations = pvcAnnotations + svPvclaim, err := svClient.CoreV1().PersistentVolumeClaims(svNamespace).Create(context.TODO(), + pvcSpec, metav1.CreateOptions{}) + svcPVCName := svPvclaim.Name + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isSVCPvcCreated := true + + ginkgo.By("Wait for SV PVC to come to bound state") + svcPv, err := fpv.WaitForPVClaimBoundPhase(svClient, []*v1.PersistentVolumeClaim{svPvclaim}, + framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volumeID := svPvclaim.Name + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = volumeID + + framework.Logf("PVC name in SV " + svcPVCName) + pvcUID := string(svPvclaim.GetUID()) + framework.Logf("PVC UUID in GC " + pvcUID) + gcClusterID := strings.Replace(svcPVCName, pvcUID, "", -1) + + framework.Logf("gcClusterId " + gcClusterID) + pv := getPvFromClaim(svClient, svPvclaim.Namespace, svPvclaim.Name) + pvUID := string(pv.UID) + framework.Logf("PV uuid " + pvUID) + + defer func() { + if isSVCPvcCreated { + err := fpv.DeletePersistentVolumeClaim(svClient, svcPVCName, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = fpv.DeletePersistentVolume(svClient, pv.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + isSVCPvcCreated = false + }() + + // Get allowed topologies for zonal storage + allowedTopologies := getTopologySelector(allowedTopologyHAMap, categories, + tkgshaTopologyLevels) + + ginkgo.By("Creating the PV") + staticPv := getPersistentVolumeSpecWithStorageClassFCDNodeSelector(volumeID, + v1.PersistentVolumeReclaimRetain, storageclass.Name, staticPVLabels, + diskSize, allowedTopologies) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating the PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc.Spec.StorageClassName = &storageclass.Name + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isGC1PvcCreated := true + + // Wait for PV and PVC to Bind. + framework.ExpectNoError(fpv.WaitOnPVandPVC(client, framework.NewTimeoutContextWithDefaults(), + namespace, staticPv, staticPvc)) + + defer func() { + if isGC1PvcCreated { + err := fpv.DeletePersistentVolumeClaim(client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = fpv.DeletePersistentVolume(client, staticPv.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVs, volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, staticPv.Name, framework.Poll, + framework.PodDeleteTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeID)) + ginkgo.By("Verify volume is deleted in Supervisor Cluster") + volumeExists := verifyVolumeExistInSupervisorCluster(svcPv[0].Spec.CSI.VolumeHandle) + gomega.Expect(volumeExists).To(gomega.BeFalse()) + } + isGC1PvcCreated = false + + }() + + ginkgo.By("Verify SV storageclass points to GC storageclass") + gomega.Expect(*svPvclaim.Spec.StorageClassName == storageclass.Name).To( + gomega.BeTrue(), "SV storageclass does not match with gc storageclass") + framework.Logf("GC PVC's storageclass matches SVC PVC's storageclass") + + ginkgo.By("Verify GV PV has has required PV node affinity details") + _, err = verifyVolumeTopologyForLevel5(staticPv, allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("GC PV: %s has required Pv node affinity details", staticPv.Name) + + ginkgo.By("Verify SV PV has has required PV node affinity details") + _, err = verifyVolumeTopologyForLevel5(svcPv[0], allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("SVC PV: %s has required PV node affinity details", svcPv[0].Name) + time.Sleep(time.Duration(60) * time.Second) + + ginkgo.By("Delete PVC in GC1") + err = fpv.DeletePersistentVolumeClaim(client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isGC1PvcCreated = false + + err = fpv.DeletePersistentVolume(client, staticPv.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verifying if volume still exists in the Supervisor Cluster") + // svcPVCName refers to PVC Name in the supervisor cluster. + volumeID = getVolumeIDFromSupervisorCluster(svPvclaim.Name) + gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) + pvAnnotations := svcPv[0].Annotations + pvSpec := svcPv[0].Spec.CSI + pvStorageClass := svcPv[0].Spec.StorageClassName + + newGcKubconfigPath = os.Getenv("NEW_GUEST_CLUSTER_KUBE_CONFIG") + if newGcKubconfigPath == "" { + ginkgo.Skip("Env NEW_GUEST_CLUSTER_KUBE_CONFIG is missing") + } + clientNewGc, err = createKubernetesClientFromConfig(newGcKubconfigPath) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Error creating k8s client with %v: %v", newGcKubconfigPath, err)) + ginkgo.By("Creating namespace on second GC") + ns, err := framework.CreateTestingNS(f.BaseName, clientNewGc, map[string]string{ + "e2e-framework": f.BaseName, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Error creating namespace on second GC") + + namespaceNewGC := ns.Name + framework.Logf("Created namespace on second GC %v", namespaceNewGC) + defer func() { + err := clientNewGc.CoreV1().Namespaces().Delete(ctx, namespaceNewGC, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Getting ready nodes on GC 2") + nodeList, err := fnodes.GetReadySchedulableNodes(clientNewGc) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero(), "Unable to find ready and schedulable Node") + + ginkgo.By("Creating PVC in New GC with the vol handle from SVC") + scParameters = make(map[string]string) + scParameters[scParamFsType] = ext4FSType + scParameters[svStorageClassName] = storageclass.Name + storageclassNewGC, err := clientNewGc.StorageV1().StorageClasses().Get(ctx, zonalPolicy, metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + pvcNew, err := createPVC(clientNewGc, namespaceNewGC, nil, "", storageclassNewGC, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + var pvcs []*v1.PersistentVolumeClaim + pvcs = append(pvcs, pvcNew) + ginkgo.By("Waiting for all claims to be in bound state") + _, err = fpv.WaitForPVClaimBoundPhase(clientNewGc, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvNewGC := getPvFromClaim(clientNewGc, pvcNew.Namespace, pvcNew.Name) + volumeIDNewGC := pvNewGC.Spec.CSI.VolumeHandle + svcNewPVCName := volumeIDNewGC + volumeIDNewGC = getVolumeIDFromSupervisorCluster(svcNewPVCName) + gomega.Expect(volumeIDNewGC).NotTo(gomega.BeEmpty()) + + framework.Logf("PVC name in SV " + svcNewPVCName) + pvcNewUID := string(pvcNew.GetUID()) + framework.Logf("pvcNewUID in GC " + pvcNewUID) + gcNewClusterID := strings.Replace(svcNewPVCName, pvcNewUID, "", -1) + framework.Logf("pvNew uuid " + gcNewClusterID) + + ginkgo.By("Creating PV in new guest cluster with volume handle from SVC") + pvNew := getPersistentVolumeSpec(svPvclaim.Name, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + pvNew.Annotations = pvAnnotations + pvNew.Spec.StorageClassName = pvStorageClass + pvNew.Spec.CSI = pvSpec + pvNew.Spec.CSI.VolumeHandle = svPvclaim.Name + pvNew, err = clientNewGc.CoreV1().PersistentVolumes().Create(ctx, pvNew, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvNewUID := string(pvNew.UID) + framework.Logf("pvNew uuid " + pvNewUID) + + defer func() { + ginkgo.By("Delete PVC in GC2") + err = fpv.DeletePersistentVolumeClaim(clientNewGc, pvcNew.Name, namespaceNewGC) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = fpv.DeletePersistentVolume(clientNewGc, pvNew.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create a pod and verify pod gets scheduled on appropriate " + + "nodes preset in the availability zone") + pod, err := createPod(clientNewGc, namespaceNewGC, nil, []*v1.PersistentVolumeClaim{pvcNew}, false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = verifyPodLocationLevel5(pod, nodeList, allowedTopologyHAMap) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + ginkgo.By("Delete pod") + err = fpod.DeletePodWithWait(clientNewGc, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify volume is detached from the node") + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(clientNewGc, + staticPv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", + staticPv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + }() + + }) + }) diff --git a/tests/e2e/tkgs_ha_site_down.go b/tests/e2e/tkgs_ha_site_down.go index 8073dbeed5..83e2ea0c18 100644 --- a/tests/e2e/tkgs_ha_site_down.go +++ b/tests/e2e/tkgs_ha_site_down.go @@ -95,6 +95,16 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { }) + ginkgo.AfterEach(func() { + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + }) + /* Bring down ESX in AZ1 1. Use Zonal storage class of AZ1 with immediate binding @@ -157,7 +167,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = replicas CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) @@ -301,7 +311,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = replicas CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) @@ -472,7 +482,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = replicas CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) @@ -601,7 +611,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = replicas CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) @@ -722,7 +732,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = replicas CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) @@ -851,7 +861,7 @@ var _ = ginkgo.Describe("[csi-tkgs-ha] Tkgs-HA-SiteDownTests", func() { statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = replicas CreateStatefulSet(namespace, statefulset, client) stsList = append(stsList, statefulset) diff --git a/tests/e2e/tkgs_ha_utils.go b/tests/e2e/tkgs_ha_utils.go index 69e79899a9..f91b02bf55 100644 --- a/tests/e2e/tkgs_ha_utils.go +++ b/tests/e2e/tkgs_ha_utils.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -182,7 +183,7 @@ func verifyVolumeProvisioningWithServiceDown(serviceName string, namespace strin statefulset := GetStatefulSetFromManifest(namespace) ginkgo.By("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name *statefulset.Spec.Replicas = 3 _, err = client.AppsV1().StatefulSets(namespace).Create(ctx, statefulset, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -254,13 +255,13 @@ func verifyOnlineVolumeExpansionOnGc(client clientset.Interface, namespace strin gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - _ = framework.RunKubectlOrDie(namespace, "cp", testdataFile, + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) onlineVolumeResizeCheck(f, client, namespace, svcPVCName, volHandle, pvclaim, pod) ginkgo.By("Checking data consistency after PVC resize") - _ = framework.RunKubectlOrDie(namespace, "cp", + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") defer func() { op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() @@ -282,32 +283,10 @@ func verifyOnlineVolumeExpansionOnGc(client clientset.Interface, namespace strin // verifyOfflineVolumeExpansionOnGc is a util method which helps in verifying offline volume expansion on gc func verifyOfflineVolumeExpansionOnGc(client clientset.Interface, pvclaim *v1.PersistentVolumeClaim, svcPVCName string, namespace string, volHandle string, pod *v1.Pod, pv *v1.PersistentVolume, f *framework.Framework) { - cmd := []string{"exec", "", "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} - ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - cmd[1] = pod.Name - lastOutput := framework.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) - ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") originalFsSize, err := getFSSizeMb(f, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - rand.New(rand.NewSource(time.Now().Unix())) - testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) - ginkgo.By(fmt.Sprintf("Creating a 512mb test data file %v", testdataFile)) - op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), - "bs=64k", "count=8000").Output() - fmt.Println(op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - op, err = exec.Command("rm", "-f", testdataFile).Output() - fmt.Println(op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - - _ = framework.RunKubectlOrDie(namespace, "cp", testdataFile, - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) - // Delete POD. ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s before expansion", pod.Name, namespace)) err = fpod.DeletePodWithWait(client, pod) @@ -391,11 +370,6 @@ func verifyOfflineVolumeExpansionOnGc(client clientset.Interface, pvclaim *v1.Pe gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - ginkgo.By("Verify after expansion the filesystem type is as expected") - cmd[1] = pod.Name - lastOutput = framework.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) - ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) framework.ExpectNoError(err, "while waiting for fs resize to finish") @@ -413,20 +387,6 @@ func verifyOfflineVolumeExpansionOnGc(client clientset.Interface, pvclaim *v1.Pe framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize) } - ginkgo.By("Checking data consistency after PVC resize") - _ = framework.RunKubectlOrDie(namespace, "cp", - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") - defer func() { - op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() - fmt.Println("rm: ", op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - ginkgo.By("Running diff...") - op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output() - fmt.Println("diff: ", op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(len(op)).To(gomega.BeZero()) - ginkgo.By("File system resize finished successfully in GC") ginkgo.By("Checking for PVC resize completion on SVC PVC") _, err = waitForFSResizeInSvc(svcPVCName) @@ -563,22 +523,28 @@ func verifyVolumeMetadataOnDeployments(ctx context.Context, pvcName, metav1.GetOptions{}) gomega.Expect(pvclaim).NotTo(gomega.BeNil()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) - gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) - svcPVCName := pv.Spec.CSI.VolumeHandle - - svcPVC := getPVCFromSupervisorCluster(svcPVCName) - gomega.Expect(*svcPVC.Spec.StorageClassName == storagePolicyName).To( - gomega.BeTrue(), "SV Pvc storageclass does not match with SV storageclass") - framework.Logf("GC PVC's storageclass matches SVC PVC's storageclass") - - verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pod, - nodeList, svcPVC, pv, svcPVCName) - // Verify the attached volume match the one in CNS cache - err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim, - pv, pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if guestCluster { + volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + svcPVCName := pv.Spec.CSI.VolumeHandle + + svcPVC := getPVCFromSupervisorCluster(svcPVCName) + gomega.Expect(*svcPVC.Spec.StorageClassName == storagePolicyName).To( + gomega.BeTrue(), "SV Pvc storageclass does not match with SV storageclass") + framework.Logf("GC PVC's storageclass matches SVC PVC's storageclass") + + verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pod, + nodeList, svcPVC, pv, svcPVCName) + + // Verify the attached volume match the one in CNS cache + err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim, + pv, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else if vanillaCluster { + err = waitAndVerifyCnsVolumeMetadata(pv.Spec.CSI.VolumeHandle, pvclaim, pv, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } } } } @@ -839,3 +805,93 @@ func exitHostMM(ctx context.Context, host *object.HostSystem, timeout int32) { framework.Logf("Host: %v exited from maintenance mode", host) } + +// PodAffinity values are set in this method +func getPodAffinityTerm(allowedTopologyHAMap map[string][]string) []v1.PodAffinityTerm { + var podAffinityTerm v1.PodAffinityTerm + var podAffinityTerms []v1.PodAffinityTerm + var labelSelector *metav1.LabelSelector + var labelSelectorRequirements []metav1.LabelSelectorRequirement + var labelSelectorRequirement metav1.LabelSelectorRequirement + + labelSelectorRequirement.Key = "app" + labelSelectorRequirement.Operator = "In" + labelSelectorRequirement.Values = []string{"nginx"} + labelSelectorRequirements = append(labelSelectorRequirements, labelSelectorRequirement) + labelSelector = new(metav1.LabelSelector) + labelSelector.MatchExpressions = labelSelectorRequirements + podAffinityTerm.LabelSelector = labelSelector + for key := range allowedTopologyHAMap { + podAffinityTerm.TopologyKey = key + } + podAffinityTerms = append(podAffinityTerms, podAffinityTerm) + return podAffinityTerms +} + +// verifyStsVolumeMetadata verifies sts pod replicas and tkg annotations and +// node affinities on svc pvc and verify cns volume meetadata +func verifyStsVolumeMetadata(client clientset.Interface, ctx context.Context, namespace string, + statefulset *appsv1.StatefulSet, replicas int32, allowedTopologyHAMap map[string][]string, + categories []string, storagePolicyName string, nodeList *v1.NodeList, f *framework.Framework) { + // Waiting for pods status to be Ready + fss.WaitForStatusReadyReplicas(client, statefulset, replicas) + gomega.Expect(fss.CheckMount(client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) + gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), + fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), + "Number of Pods in the statefulset should match with number of replicas") + + ginkgo.By("Verify GV PV and SV PV has has required PV node affinity details") + ginkgo.By("Verify SV PVC has TKG HA annotations set") + // Get the list of Volumes attached to Pods before scale down + for _, sspod := range ssPodsBeforeScaleDown.Items { + pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pvcName := volumespec.PersistentVolumeClaim.ClaimName + pv := getPvFromClaim(client, statefulset.Namespace, pvcName) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, + pvcName, metav1.GetOptions{}) + gomega.Expect(pvclaim).NotTo(gomega.BeNil()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle) + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + svcPVCName := pv.Spec.CSI.VolumeHandle + + svcPVC := getPVCFromSupervisorCluster(svcPVCName) + gomega.Expect(*svcPVC.Spec.StorageClassName == storagePolicyName).To( + gomega.BeTrue(), "SV Pvc storageclass does not match with SV storageclass") + framework.Logf("GC PVC's storageclass matches SVC PVC's storageclass") + + verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pod, + nodeList, svcPVC, pv, svcPVCName) + + // Verify the attached volume match the one in CNS cache + err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim, + pv, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf(fmt.Sprintf("Verify volume: %s is attached to the node: %s", + pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName)) + var vmUUID string + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName, + crdCNSNodeVMAttachment, crdVersion, crdGroup, true) + + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Disk is not attached to the node") + framework.Logf("verify the attached volumes match those in CNS Cache") + err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim, + pv, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + } + +} diff --git a/tests/e2e/topology_aware_node_poweroff.go b/tests/e2e/topology_aware_node_poweroff.go index 28bd40fee5..55e58b4272 100644 --- a/tests/e2e/topology_aware_node_poweroff.go +++ b/tests/e2e/topology_aware_node_poweroff.go @@ -223,6 +223,12 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + ginkgo.By("Creating service") + service := CreateService(namespace, client) + defer func() { + deleteService(namespace, client, service) + }() + ginkgo.By("Creating statefulset with single replica") statefulset, service := createStatefulSetWithOneReplica(client, manifestPath, namespace) defer func() { diff --git a/tests/e2e/topology_multi_replica.go b/tests/e2e/topology_multi_replica.go index 1af9005d3f..7fc74a17f3 100644 --- a/tests/e2e/topology_multi_replica.go +++ b/tests/e2e/topology_multi_replica.go @@ -40,6 +40,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -81,6 +82,7 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi k8sVersion string nimbusGeneratedVcPwd string nimbusGeneratedK8sVmPwd string + clientIndex int ) ginkgo.BeforeEach(func() { var cancel context.CancelFunc @@ -108,6 +110,7 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi topologyLength = 5 isSPSServiceStopped = false isVsanHealthServiceStopped = false + clientIndex = 0 topologyMap := GetAndExpectStringEnvVar(topologyMap) topologyAffinityDetails, topologyCategories = createTopologyMapLevel5(topologyMap, topologyLength) @@ -307,8 +310,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } /* Get current leader Csi-Controller-Pod where CSI Attacher is running" + @@ -328,7 +332,8 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi statefulSetReplicaCount = 2 ginkgo.By("Scale down statefulset replica and verify the replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -357,7 +362,8 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count to 0") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -505,15 +511,17 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Scale down StatefulSets replicas count statefulSetReplicaCount = 2 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -539,7 +547,8 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count to 0") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -683,8 +692,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Fetch the number of CSI pods running before restart @@ -698,11 +708,11 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi // Restart CSI daemonset ginkgo.By("Restart Daemonset") cmd := []string{"rollout", "restart", "daemonset/vsphere-csi-node", "--namespace=" + csiSystemNamespace} - framework.RunKubectlOrDie(csiSystemNamespace, cmd...) + e2ekubectl.RunKubectlOrDie(csiSystemNamespace, cmd...) ginkgo.By("Waiting for daemon set rollout status to finish") statusCheck := []string{"rollout", "status", "daemonset/vsphere-csi-node", "--namespace=" + csiSystemNamespace} - framework.RunKubectlOrDie(csiSystemNamespace, statusCheck...) + e2ekubectl.RunKubectlOrDie(csiSystemNamespace, statusCheck...) // wait for csi Pods to be in running ready state err = fpod.WaitForPodsRunningReady(client, csiSystemNamespace, int32(num_csi_pods), 0, pollTimeout, ignoreLabels) @@ -712,22 +722,25 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi ginkgo.By("Scale up StaefulSets replicas in parallel") statefulSetReplicaCount = 5 for i := 0; i < len(statefulSets); i++ { - scaleUpStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } /* Verify PV nde affinity and that the pods are running on appropriate nodes for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Scale down statefulset to 0 replicas statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count to 0") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -790,7 +803,7 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi // Creating multiple PVCs ginkgo.By("Trigger multiple PVCs") - pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount) + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount, nil) // Verify PVC claim to be in bound phase and create POD for each PVC ginkgo.By("Verify PVC claim to be in bound phase and create POD for each PVC") @@ -934,8 +947,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -990,7 +1004,7 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi // Creating multiple PVCs ginkgo.By("Trigger multiple PVCs") - pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount) + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount, nil) // Verify PVC claim to be in bound phase and create POD for each PVC ginkgo.By("Verify PVC claim to be in bound phase and create POD for each PVC") @@ -1139,8 +1153,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -1199,7 +1214,7 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi // Creating multiple PVCs ginkgo.By("Trigger multiple PVCs") - pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount) + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount, nil) /* Verifying if all PVCs are in Bound phase and trigger Deployment Pods for each created PVC. @@ -1223,10 +1238,10 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi gomega.Expect(err).NotTo(gomega.HaveOccurred()) /* Verify PV nde affinity and that the pods are running on appropriate nodes for each StatefulSet pod */ - verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment, - namespace, allowedTopologies, true) - deploymentList = append(deploymentList, deployment) + err = verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment, + namespace, allowedTopologies, true, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deploymentList = append(deploymentList, deployment) // Delete elected leader Csi-Controller-Pod where CSi-Attacher is running if i == 2 { ginkgo.By("Delete elected leader Csi-Controller-Pod where CSi-Attacher is running") @@ -1241,7 +1256,7 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi pv := getPvFromClaim(client, pvclaimsList[i].Namespace, pvclaimsList[i].Name) err = fpv.DeletePersistentVolumeClaim(client, pvclaimsList[i].Name, namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, pollTimeoutShort)) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, framework.ClaimProvisionTimeout)) err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1413,8 +1428,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } /* Get elected current leader Csi-Controller-Pod where CSI Attacher is running" + @@ -1432,7 +1448,8 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi statefulSetReplicaCount = 2 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if i == 1 { /* Delete newly elected leader CSi-Controller-Pod where CSI-Attacher is running */ ginkgo.By("Delete elected leader CSi-Controller-Pod where CSI-Attacher is running") @@ -1455,7 +1472,8 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count to 0") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Verify that the StatefulSet Pods, PVC's are deleted successfully @@ -1637,8 +1655,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) @@ -1718,7 +1737,8 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort username := vsphereCfg.Global.User newPassword := e2eTestPassword - err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, newPassword, vcAddress) + err = invokeVCenterChangePassword(username, nimbusGeneratedVcPwd, newPassword, vcAddress, + false, clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Modifying the password in the secret") @@ -1740,17 +1760,17 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi csiReplicaCount := *deployment.Spec.Replicas ginkgo.By("Stopping CSI driver") - isServiceStopped, err := stopCSIPods(ctx, c) + isServiceStopped, err := stopCSIPods(ctx, c, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { if isServiceStopped { framework.Logf("Starting CSI driver") - isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount) + isServiceStopped, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }() framework.Logf("Starting CSI driver") - _, err = startCSIPods(ctx, c, csiReplicaCount) + _, err = startCSIPods(ctx, c, csiReplicaCount, csiSystemNamespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // As we are in the same vCenter session, deletion of PVC should go through @@ -1759,7 +1779,8 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Reverting the password change") - err = invokeVCenterChangePassword(username, newPassword, nimbusGeneratedVcPwd, vcAddress) + err = invokeVCenterChangePassword(username, newPassword, nimbusGeneratedVcPwd, vcAddress, false, + clientIndex) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Reverting the secret change back to reflect the original password") @@ -1874,8 +1895,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(podList); i++ { - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, podList[i], - namespace, allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], + namespace, allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } }) // TESTCASE-6 @@ -2040,8 +2062,9 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") for i := 0; i < len(podList); i++ { - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, podList[i], namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, podList[i], namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Deleting Pod's @@ -2216,7 +2239,7 @@ var _ = ginkgo.Describe("[csi-topology-multireplica-level5] Topology-Aware-Provi // Creating multiple PVCs ginkgo.By("Trigger multiple PVCs") - pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount) + pvclaimsList := createMultiplePVCsInParallel(ctx, client, namespace, storageclass, pvcCount, nil) defer func() { // cleanup code for deleting PVC ginkgo.By("Deleting PVC's and PV's") diff --git a/tests/e2e/topology_operation_strom_cases.go b/tests/e2e/topology_operation_strom_cases.go index 58c48a2366..f9378d664d 100644 --- a/tests/e2e/topology_operation_strom_cases.go +++ b/tests/e2e/topology_operation_strom_cases.go @@ -204,8 +204,9 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring down few esxi hosts that belongs to zone3 @@ -236,7 +237,9 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ // Scale up statefulSets replicas count ginkgo.By("Scale up statefulset replica and verify the replica count") statefulSetReplicaCount = 35 - scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -244,7 +247,8 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ // Scale down statefulSets replica count statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleDownStatefulSetPod(ctx, client, statefulSets[1], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[1], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown = GetListOfPodsInSts(client, statefulSets[1]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -281,10 +285,16 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ } }() + ginkgo.By("Wait for k8s cluster to be healthy") + wait4AllK8sNodesToBeUp(ctx, client, nodeList) + err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Scale down statefulSets replicas count statefulSetReplicaCount = 10 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleDownStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown = GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -292,7 +302,9 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ // Scale up statefulSets replica count statefulSetReplicaCount = 35 ginkgo.By("Scale up statefulset replica and verify the replica count") - scaleUpStatefulSetPod(ctx, client, statefulSets[1], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[1], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown = GetListOfPodsInSts(client, statefulSets[1]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -301,8 +313,9 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring up all ESXi host which were powered off in zone2 @@ -341,16 +354,12 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ } } - ginkgo.By("Verify k8s cluster is healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -544,6 +553,11 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ } } + ginkgo.By("Wait for k8s cluster to be healthy") + wait4AllK8sNodesToBeUp(ctx, client, nodeList) + err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + /* Get newly elected leader Csi-Controller-Pod where CSI Provisioner is running" + find new master node IP where this Csi-Controller-Pod is running */ ginkgo.By("Get newly Leader Csi-Controller-Pod where CSI Provisioner is running and " + @@ -565,8 +579,9 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } /* Get current leader Csi-Controller-Pod where CSI Attacher is running and " + @@ -580,11 +595,6 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ "which is running on master node %s", controller_name, k8sMasterIP) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Wait for k8s cluster to be healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Verify all the workload Pods are in up and running state ginkgo.By("Verify all the workload Pods are in up and running state") ssPods = fss.GetPodList(client, statefulSets[1]) @@ -599,7 +609,8 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica and verify the replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -625,7 +636,9 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ // Scale up statefulSets replicas count statefulSetReplicaCount = 20 ginkgo.By("Scale up statefulset replica and verify the replica count") - scaleUpStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -635,15 +648,17 @@ var _ = ginkgo.Describe("[csi-topology-operation-strom-level5] "+ for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") diff --git a/tests/e2e/topology_site_down_cases.go b/tests/e2e/topology_site_down_cases.go index a4d491f82d..e3e0f7734e 100644 --- a/tests/e2e/topology_site_down_cases.go +++ b/tests/e2e/topology_site_down_cases.go @@ -188,8 +188,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring down 1 ESXi's that belongs to zone1 and Bring down 1 ESXi's that belongs to zone2 @@ -229,15 +230,18 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Scale up statefulSets replicas count ginkgo.By("Scaleup any one StatefulSets replica") statefulSetReplicaCount = 10 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -246,7 +250,8 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -275,8 +280,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // verifyVolumeMetadataInCNS @@ -295,16 +301,13 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision } } } - ginkgo.By("Verify k8s cluster is healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -393,8 +396,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring down 1 ESXi's that belongs to Cluster2 and Bring down 1 ESXi's that belongs to Cluster3 @@ -433,15 +437,18 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Scale up statefulSets replicas count ginkgo.By("Scaleup any one StatefulSets replica") statefulSetReplicaCount = 10 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -450,7 +457,8 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -471,8 +479,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // verifyVolumeMetadataInCNS @@ -492,16 +501,12 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision } } - ginkgo.By("Verify k8s cluster is healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -590,8 +595,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring down ESXi host that belongs to zone1, zone2 and zone3 @@ -616,12 +622,19 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision }() powerOffHostsList = append(append(powerOffHostsList, powerOffHostsList2...), powerOffHostsList3...) + // Wait for k8s cluster to be healthy + ginkgo.By("Wait for k8s cluster to be healthy") + wait4AllK8sNodesToBeUp(ctx, client, nodeList) + err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + /* Verify PV nde affinity and that the pods are running on appropriate nodes for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring up @@ -630,11 +643,19 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision powerOnEsxiHostByCluster(powerOffHostsList[i]) } + // Wait for k8s cluster to be healthy + ginkgo.By("Wait for k8s cluster to be healthy") + wait4AllK8sNodesToBeUp(ctx, client, nodeList) + err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Scale up statefulSets replicas count ginkgo.By("Scaleup any one StatefulSets replica") statefulSetReplicaCount = 10 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -643,23 +664,20 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") } - ginkgo.By("Wait for k8s cluster to be healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - /* Verify PV nde affinity and that the pods are running on appropriate nodes for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // verifyVolumeMetadataInCNS @@ -679,16 +697,12 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision } } - ginkgo.By("Verify k8s cluster is healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -780,8 +794,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring down ESXi hosts that belongs to zone2 @@ -811,15 +826,19 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } // Scale up statefulSets replicas count ginkgo.By("Scaleup any one StatefulSets replica") statefulSetReplicaCount = 10 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -828,7 +847,8 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -849,8 +869,10 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } // verifyVolumeMetadataInCNS @@ -870,16 +892,12 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision } } - ginkgo.By("Verify k8s cluster is healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -971,8 +989,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring down ESXi hosts that belongs to zone3 @@ -987,6 +1006,12 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision } }() + // Wait for k8s cluster to be healthy + ginkgo.By("Wait for k8s cluster to be healthy") + wait4AllK8sNodesToBeUp(ctx, client, nodeList) + err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Verify all the workload Pods are in up and running state ginkgo.By("Verify all the workload Pods are in up and running state") ssPods = fss.GetPodList(client, statefulSets[1]) @@ -1000,15 +1025,17 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Scale up statefulSets replicas count ginkgo.By("Scaleup any one StatefulSets replica") statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica and verify the replica count") - scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleUpStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -1017,7 +1044,8 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision statefulSetReplicaCount = 10 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -1038,8 +1066,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // verifyVolumeMetadataInCNS @@ -1059,16 +1088,12 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision } } - ginkgo.By("Verify k8s cluster is healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -1158,8 +1183,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // Bring down ESXi hosts that belongs to zone2 @@ -1254,7 +1280,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision // Scale down statefulSets replica count statefulSetReplicaCount = 5 ginkgo.By("Scale down statefulset replica count") - scaleDownStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[0], namespace, statefulSetReplicaCount, + true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[0]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") @@ -1271,8 +1299,9 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision for each StatefulSet pod */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") for i := 0; i < len(statefulSets); i++ { - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulSets[i], namespace, allowedTopologies, true) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulSets[i], namespace, allowedTopologies, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // verifyVolumeMetadataInCNS @@ -1291,16 +1320,13 @@ var _ = ginkgo.Describe("[csi-topology-sitedown-level5] Topology-Aware-Provision } } } - ginkgo.By("Verify k8s cluster is healthy") - wait4AllK8sNodesToBeUp(ctx, client, nodeList) - err = waitForAllNodes2BeReady(ctx, client, pollTimeout*4) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulSets replica count statefulSetReplicaCount = 0 ginkgo.By("Scale down statefulset replica count") for i := 0; i < len(statefulSets); i++ { - scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true) + err = scaleDownStatefulSetPod(ctx, client, statefulSets[i], namespace, statefulSetReplicaCount, true, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ssPodsAfterScaleDown := GetListOfPodsInSts(client, statefulSets[i]) gomega.Expect(len(ssPodsAfterScaleDown.Items) == int(statefulSetReplicaCount)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") diff --git a/tests/e2e/topology_snapshot.go b/tests/e2e/topology_snapshot.go index 97722ed42c..51f1b52105 100644 --- a/tests/e2e/topology_snapshot.go +++ b/tests/e2e/topology_snapshot.go @@ -16,6 +16,8 @@ package e2e import ( "context" "fmt" + "os" + "strconv" "strings" "time" @@ -26,7 +28,6 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -38,8 +39,8 @@ import ( fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" - snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + snapV1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" ) var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", func() { @@ -60,6 +61,7 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu leafNode int leafNodeTag1 int leafNodeTag2 int + pandoraSyncWaitTime int ) ginkgo.BeforeEach(func() { @@ -109,6 +111,12 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu gomega.Expect(err).NotTo(gomega.HaveOccurred()) } + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } }) /* @@ -196,15 +204,17 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) } }() @@ -225,7 +235,7 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu snapshotId := strings.Split(snapshothandle, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId) + err = verifySnapshotIsCreatedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) _, err = snapc.SnapshotV1().VolumeSnapshotContents().Get(ctx, @@ -262,8 +272,9 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) @@ -280,8 +291,7 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu }() ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) snapshotCreated = false framework.Logf("Wait till the volume snapshot is deleted") @@ -290,15 +300,11 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu snapshotContentCreated = false ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId) + err = verifySnapshotIsDeletedInCNS(volHandle, snapshotId, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Deleting volume snapshot Again to check Not found error") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot.Name, metav1.DeleteOptions{}) - if !apierrors.IsNotFound(err) { - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - time.Sleep(40 * time.Second) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot.Name, pandoraSyncWaitTime) }) /* @@ -363,8 +369,9 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu // Verify PV node affinity and that the PODS are running on appropriate nodes ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Fetching pod 3, pvc3 and pv3 details") pod3, err := client.CoreV1().Pods(namespace).Get(ctx, @@ -406,16 +413,17 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu defer func() { if snapshotContentCreated { framework.Logf("Deleting volume snapshot content") - err := snapc.SnapshotV1().VolumeSnapshotContents().Delete(ctx, - *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, metav1.DeleteOptions{}) + deleteVolumeSnapshotContentWithPandoraWait(ctx, snapc, + *volumeSnapshot3.Status.BoundVolumeSnapshotContentName, pandoraSyncWaitTime) + + framework.Logf("Wait till the volume snapshot is deleted") + err = waitForVolumeSnapshotContentToBeDeleted(*snapc, ctx, *volumeSnapshot3.Status.BoundVolumeSnapshotContentName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if snapshotCreated { framework.Logf("Deleting volume snapshot") - err := snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, volumeSnapshot3.Name, - metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) } }() @@ -437,7 +445,7 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu snapshotId2 := strings.Split(snapshothandle3, "+")[1] ginkgo.By("Query CNS and check the volume snapshot entry") - err = verifySnapshotIsCreatedInCNS(volHandle3, snapshotId2) + err = verifySnapshotIsCreatedInCNS(volHandle3, snapshotId2, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvcSpec := getPersistentVolumeClaimSpecWithDatasource(namespace, "1Gi", storageclass, nil, @@ -466,13 +474,12 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu appropriate node as specified in the allowed topologies of SC */ ginkgo.By("Verify newly created PV node affinity and that the new PODS are running " + "on appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Delete volume snapshot and verify the snapshot content is deleted") - err = snapc.SnapshotV1().VolumeSnapshots(namespace).Delete(ctx, - volumeSnapshot3.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + deleteVolumeSnapshotWithPandoraWait(ctx, snapc, namespace, volumeSnapshot3.Name, pandoraSyncWaitTime) snapshotCreated = false framework.Logf("Wait till the volume snapshot content is deleted") @@ -482,7 +489,7 @@ var _ = ginkgo.Describe("[topology-snapshot] Topology Volume Snapshot tests", fu snapshotContentCreated = false ginkgo.By("Verify snapshot entry is deleted from CNS") - err = verifySnapshotIsDeletedInCNS(volHandle3, snapshotId2) + err = verifySnapshotIsDeletedInCNS(volHandle3, snapshotId2, false) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 4f2c2c6ac3..05f1232c69 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -71,15 +71,15 @@ import ( "k8s.io/kubectl/pkg/util/podutils" "k8s.io/kubernetes/test/e2e/framework" fdep "k8s.io/kubernetes/test/e2e/framework/deployment" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" "k8s.io/kubernetes/test/e2e/framework/manifest" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" fssh "k8s.io/kubernetes/test/e2e/framework/ssh" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" - snapc "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" "sigs.k8s.io/controller-runtime/pkg/client" cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1" @@ -483,6 +483,50 @@ type GetTaskTstatus struct { OrgID string `json:"org_id"` } +// This Struct is used for Creating tanzu cluster +type TanzuCluster struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + Metadata struct { + Name string `yaml:"name"` + Namespace string `yaml:"namespace"` + } `yaml:"metadata"` + Spec struct { + Topology struct { + ControlPlane struct { + TKR struct { + Reference struct { + Name string `yaml:"name"` + } `yaml:"reference"` + } `yaml:"tkr"` + Replicas int `yaml:"replicas"` + VMClass string `yaml:"vmClass"` + StorageClass string `yaml:"storageClass"` + } `yaml:"controlPlane"` + NodePools []struct { + Replicas int `yaml:"replicas"` + Name string `yaml:"name"` + VMClass string `yaml:"vmClass"` + StorageClass string `yaml:"storageClass"` + } `yaml:"nodePools"` + } `yaml:"topology"` + Settings struct { + Network struct { + CNI struct { + Name string `yaml:"name"` + } `yaml:"cni"` + Services struct { + CIDRBlocks []string `yaml:"cidrBlocks"` + } `yaml:"services"` + Pods struct { + CIDRBlocks []string `yaml:"cidrBlocks"` + } `yaml:"pods"` + ServiceDomain string `yaml:"serviceDomain"` + } `yaml:"network"` + } `yaml:"settings"` + } `yaml:"spec"` +} + // getVSphereStorageClassSpec returns Storage Class Spec with supplied storage // class parameters. func getVSphereStorageClassSpec(scName string, scParameters map[string]string, @@ -546,23 +590,16 @@ func getPvFromClaim(client clientset.Interface, namespace string, claimName stri // getNodeUUID returns Node VM UUID for requested node. func getNodeUUID(ctx context.Context, client clientset.Interface, nodeName string) string { vmUUID := "" - if isCsiFssEnabled(ctx, client, GetAndExpectStringEnvVar(envCSINamespace), useCsiNodeID) { - csiNode, err := client.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - csiDriverFound := false - for _, driver := range csiNode.Spec.Drivers { - if driver.Name == e2evSphereCSIDriverName { - csiDriverFound = true - vmUUID = driver.NodeID - } + csiNode, err := client.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + csiDriverFound := false + for _, driver := range csiNode.Spec.Drivers { + if driver.Name == e2evSphereCSIDriverName { + csiDriverFound = true + vmUUID = driver.NodeID } - gomega.Expect(csiDriverFound).To(gomega.BeTrue(), "CSI driver not found in CSI node %s", nodeName) - } else { - node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - vmUUID = strings.TrimPrefix(node.Spec.ProviderID, providerPrefix) - gomega.Expect(vmUUID).NotTo(gomega.BeEmpty()) } + gomega.Expect(csiDriverFound).To(gomega.BeTrue(), "CSI driver not found in CSI node %s", nodeName) ginkgo.By(fmt.Sprintf("VM UUID is: %s for node: %s", vmUUID, nodeName)) return vmUUID } @@ -858,6 +895,7 @@ func createPVC(client clientset.Interface, pvcnamespace string, pvclaimlabels ma // storage class. func scaleCreatePVC(client clientset.Interface, pvcnamespace string, pvclaimlabels map[string]string, ds string, storageclass *storagev1.StorageClass, accessMode v1.PersistentVolumeAccessMode, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() pvcspec := getPersistentVolumeClaimSpecWithStorageClass(pvcnamespace, ds, storageclass, pvclaimlabels, accessMode) @@ -876,6 +914,7 @@ func scaleCreatePVC(client clientset.Interface, pvcnamespace string, pvclaimlabe func scaleCreateDeletePVC(client clientset.Interface, pvcnamespace string, pvclaimlabels map[string]string, ds string, storageclass *storagev1.StorageClass, accessMode v1.PersistentVolumeAccessMode, wg *sync.WaitGroup, lock *sync.Mutex, worker int) { + defer ginkgo.GinkgoRecover() ctx, cancel := context.WithCancel(context.Background()) var totalPVCDeleted int = 0 defer cancel() @@ -948,9 +987,12 @@ func updateDeploymentReplicawithWait(client clientset.Interface, count int32, na var err error waitErr := wait.Poll(healthStatusPollInterval, healthStatusPollTimeout, func() (bool, error) { deployment, err = client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) if err != nil { - return false, nil + if count == 0 && apierrors.IsNotFound(err) { + return true, nil + } else { + return false, err + } } *deployment.Spec.Replicas = count ginkgo.By("Waiting for update operation on deployment to take effect") @@ -1110,7 +1152,7 @@ func updateCSIDeploymentProvisionerTimeout(client clientset.Interface, namespace framework.Logf("Waiting for a min for update operation on deployment to take effect...") time.Sleep(1 * time.Minute) err = fpod.WaitForPodsRunningReady(client, csiSystemNamespace, int32(num_csi_pods), 0, - pollTimeout, ignoreLabels) + 2*pollTimeout, ignoreLabels) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1410,7 +1452,7 @@ func checkVcenterServicesRunning( var pollTime time.Duration if len(timeout) == 0 { - pollTime = pollTimeout * 2 + pollTime = pollTimeout * 6 } else { pollTime = timeout[0] } @@ -1596,7 +1638,7 @@ func upgradeTKG(wcpHost string, wcpToken string, tkgCluster string, tkgImage str } // createGC method creates GC and takes WCP host and bearer token as input param -func createGC(wcpHost string, wcpToken string) { +func createGC(wcpHost string, wcpToken string, tkgImageName string, clusterName string) { transCfg := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates @@ -1611,7 +1653,26 @@ func createGC(wcpHost string, wcpToken string) { gcBytes, err := os.ReadFile(tkg_yaml) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - req, err := http.NewRequest("POST", createGCURL, bytes.NewBuffer(gcBytes)) + var tkg TanzuCluster + err = yaml.Unmarshal([]byte(gcBytes), &tkg) + if err != nil { + framework.Logf("Error: %v", err) + } + + // Change the value of the replaceImage field + tkg.Spec.Topology.ControlPlane.TKR.Reference.Name = tkgImageName + tkg.Metadata.Name = clusterName + + // Marshal the updated struct back to YAML + updatedYAML, err := yaml.Marshal(&tkg) + if err != nil { + framework.Logf("Error: %v", err) + } + + // Convert the marshalled YAML to []byte + updatedYAMLBytes := []byte(updatedYAML) + + req, err := http.NewRequest("POST", createGCURL, bytes.NewBuffer(updatedYAMLBytes)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) req.Header.Add("Authorization", "Bearer "+wcpToken) req.Header.Add("Accept", "application/yaml") @@ -1951,7 +2012,10 @@ func writeToFile(filePath, data string) error { // invokeVCenterChangePassword invokes `dir-cli password reset` command on the // given vCenter host over SSH, thereby resetting the currentPassword of the // `user` to the `newPassword`. -func invokeVCenterChangePassword(user, adminPassword, newPassword, host string) error { +func invokeVCenterChangePassword(user, adminPassword, newPassword, host string, + isMultiVcSetup bool, clientIndex int) error { + var copyCmd string + var removeCmd string // Create an input file and write passwords into it. path := "input.txt" data := fmt.Sprintf("%s\n%s\n", adminPassword, newPassword) @@ -1964,16 +2028,27 @@ func invokeVCenterChangePassword(user, adminPassword, newPassword, host string) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() // Remote copy this input file to VC. - copyCmd := fmt.Sprintf("/bin/cat %s | /usr/bin/ssh root@%s '/usr/bin/cat >> input_copy.txt'", - path, e2eVSphere.Config.Global.VCenterHostname) + if !isMultiVcSetup { + copyCmd = fmt.Sprintf("/bin/cat %s | /usr/bin/ssh root@%s '/usr/bin/cat >> input_copy.txt'", + path, e2eVSphere.Config.Global.VCenterHostname) + } else { + vCenter := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",")[clientIndex] + copyCmd = fmt.Sprintf("/bin/cat %s | /usr/bin/ssh root@%s '/usr/bin/cat >> input_copy.txt'", + path, vCenter) + } fmt.Printf("Executing the command: %s\n", copyCmd) _, err = exec.Command("/bin/sh", "-c", copyCmd).Output() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { // Remove the input_copy.txt file from VC. - removeCmd := fmt.Sprintf("/usr/bin/ssh root@%s '/usr/bin/rm input_copy.txt'", - e2eVSphere.Config.Global.VCenterHostname) + if !isMultiVcSetup { + removeCmd = fmt.Sprintf("/usr/bin/ssh root@%s '/usr/bin/rm input_copy.txt'", + e2eVSphere.Config.Global.VCenterHostname) + } else { + vCenter := strings.Split(multiVCe2eVSphere.multivcConfig.Global.VCenterHostname, ",")[clientIndex] + removeCmd = fmt.Sprintf("/usr/bin/ssh root@%s '/usr/bin/rm input_copy.txt'", + vCenter) + } _, err = exec.Command("/bin/sh", "-c", removeCmd).Output() gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() @@ -2270,6 +2345,7 @@ func getVolumeIDFromSupervisorCluster(pvcName string) string { svcPV := getPvFromClaim(svcClient, svNamespace, pvcName) volumeHandle := svcPV.Spec.CSI.VolumeHandle ginkgo.By(fmt.Sprintf("Found volume in Supervisor cluster with VolumeID: %s", volumeHandle)) + return volumeHandle } @@ -2288,7 +2364,7 @@ func getPvFromSupervisorCluster(pvcName string) *v1.PersistentVolume { func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) { for _, filePath := range filePaths { - _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), + _, err := e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath) framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName)) } @@ -2296,7 +2372,7 @@ func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) { for _, filePath := range filePaths { - err := framework.CreateEmptyFileOnPod(namespace, podName, filePath) + err := e2eoutput.CreateEmptyFileOnPod(namespace, podName, filePath) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -2482,6 +2558,7 @@ func verifyIsDetachedInSupervisor(ctx context.Context, f *framework.Framework, // namespace. It takes client, namespace, pvc, pv as input. func verifyPodCreation(f *framework.Framework, client clientset.Interface, namespace string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { + defer ginkgo.GinkgoRecover() ginkgo.By("Create pod and wait for this to be in running phase") pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3134,14 +3211,14 @@ func GetPodSpecByUserID(ns string, nodeSelector map[string]string, pvclaims []*v // writeDataOnFileFromPod writes specified data from given Pod at the given. func writeDataOnFileFromPod(namespace string, podName string, filePath string, data string) { - _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), + _, err := e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf(" echo %s > %s ", data, filePath)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } // readFileFromPod read data from given Pod and the given file. func readFileFromPod(namespace string, podName string, filePath string) string { - output, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), + output, err := e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("less %s", filePath)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return output @@ -3347,6 +3424,9 @@ func VsanObjIndentities(ctx context.Context, vs *vSphere, pvName string) string for _, cluster := range clusterComputeResource { if strings.Contains(cluster.Name(), computeCluster) { + // Fix for NotAuthenticated issue + bootstrap() + clusterConfig, err := vsanHealthClient.VsanQueryObjectIdentities(ctx, cluster.Reference()) framework.Logf("clusterconfig %v", clusterConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3930,14 +4010,9 @@ func createPod(client clientset.Interface, namespace string, nodeSelector map[st return pod, nil } -// createDeployment create a deployment with 1 replica for given pvcs and node -// selector. -func createDeployment(ctx context.Context, client clientset.Interface, replicas int32, +func getDeploymentSpec(ctx context.Context, client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, - pvclaims []*v1.PersistentVolumeClaim, command string, isPrivileged bool, image string) (*appsv1.Deployment, error) { - if len(command) == 0 { - command = "trap exit TERM; while true; do sleep 1; done" - } + pvclaims []*v1.PersistentVolumeClaim, command string, isPrivileged bool, image string) *appsv1.Deployment { zero := int64(0) deploymentName := "deployment-" + string(uuid.NewUUID()) deploymentSpec := &appsv1.Deployment{ @@ -3985,6 +4060,19 @@ func createDeployment(ctx context.Context, client clientset.Interface, replicas if nodeSelector != nil { deploymentSpec.Spec.Template.Spec.NodeSelector = nodeSelector } + return deploymentSpec +} + +// createDeployment create a deployment with 1 replica for given pvcs and node +// selector. +func createDeployment(ctx context.Context, client clientset.Interface, replicas int32, + podLabels map[string]string, nodeSelector map[string]string, namespace string, + pvclaims []*v1.PersistentVolumeClaim, command string, isPrivileged bool, image string) (*appsv1.Deployment, error) { + if len(command) == 0 { + command = "trap exit TERM; while true; do sleep 1; done" + } + deploymentSpec := getDeploymentSpec(ctx, client, replicas, podLabels, nodeSelector, namespace, + pvclaims, command, isPrivileged, image) deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) @@ -4633,17 +4721,24 @@ which PV is provisioned. */ func verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx context.Context, client clientset.Interface, statefulset *appsv1.StatefulSet, namespace string, - allowedTopologies []v1.TopologySelectorLabelRequirement, parallelStatefulSetCreation bool) { + allowedTopologies []v1.TopologySelectorLabelRequirement, + parallelStatefulSetCreation bool, isMultiVcSetup bool) error { allowedTopologiesMap := createAllowedTopologiesMap(allowedTopologies) var ssPodsBeforeScaleDown *v1.PodList + var err error + if parallelStatefulSetCreation { ssPodsBeforeScaleDown = GetListOfPodsInSts(client, statefulset) } else { ssPodsBeforeScaleDown = fss.GetPodList(client, statefulset) } + for _, sspod := range ssPodsBeforeScaleDown.Items { - _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + if err != nil { + return err + } + for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { // get pv details @@ -4653,36 +4748,60 @@ func verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx context.Cont ginkgo.By("Verifying PV node affinity details") res, err := verifyVolumeTopologyForLevel5(pv, allowedTopologiesMap) if res { - framework.Logf("PV %s node affinity details lies in the specified allowed topologies of Storage Class", pv.Name) + framework.Logf("PV %s node affinity details lie in the specified allowed "+ + "topologies of Storage Class", pv.Name) + } + if !res { + return fmt.Errorf("PV %s node affinity details are not in the specified allowed "+ + "topologies of Storage Class", pv.Name) + } + if err != nil { + return err } - gomega.Expect(res).To(gomega.BeTrue(), "PV %s node affinity details is not in the "+ - "specified allowed topologies of Storage Class", pv.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // fetch node details nodeList, err := fnodes.GetReadySchedulableNodes(client) - framework.ExpectNoError(err, "Unable to find ready and schedulable Node") - if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") + if err != nil { + return err + } + if len(nodeList.Items) <= 0 { + return fmt.Errorf("unable to find ready and schedulable Node") } + // verify pod is running on appropriate nodes ginkgo.By("Verifying If Pods are running on appropriate nodes as mentioned in SC") res, err = verifyPodLocationLevel5(&sspod, nodeList, allowedTopologiesMap) if res { - framework.Logf("Pod %v is running on appropriate node as specified "+ - "in the allowed topolgies of Storage Class", sspod.Name) + framework.Logf("Pod %v is running on an appropriate node as specified in the "+ + "allowed topologies of Storage Class", sspod.Name) + } + if !res { + return fmt.Errorf("pod %v is not running on an appropriate node as specified "+ + "in the allowed topologies of Storage Class", sspod.Name) + } + if err != nil { + return err } - gomega.Expect(res).To(gomega.BeTrue(), "Pod %v is not running on appropriate node "+ - "as specified in allowed topolgies of Storage Class", sspod.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify the attached volume match the one in CNS cache - error := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, - volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) - gomega.Expect(error).NotTo(gomega.HaveOccurred()) + if !isMultiVcSetup { + err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } + } else { + err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } + } } } } + + return nil } /* @@ -4760,20 +4879,29 @@ func scaleStatefulSetPods(c clientset.Interface, ss *appsv1.StatefulSet, count i } /* -scaleDownStatefulSetPod is a utility method which is used to scale down the count of StatefulSet replicas. +scaleDownStatefulSetPod util is used to perform scale down operation on StatefulSert Pods +and later verifies that after scale down operation volume gets detached from the node +and returns nil if no error found */ func scaleDownStatefulSetPod(ctx context.Context, client clientset.Interface, - statefulset *appsv1.StatefulSet, namespace string, replicas int32, parallelStatefulSetCreation bool) { + statefulset *appsv1.StatefulSet, namespace string, replicas int32, parallelStatefulSetCreation bool, + isMultiVcSetup bool) error { ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas)) var ssPodsAfterScaleDown *v1.PodList + var err error + if parallelStatefulSetCreation { _, scaledownErr := scaleStatefulSetPods(client, statefulset, replicas) - gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) + if scaledownErr != nil { + return scaledownErr + } fss.WaitForStatusReadyReplicas(client, statefulset, replicas) ssPodsAfterScaleDown = GetListOfPodsInSts(client, statefulset) } else { _, scaledownErr := fss.Scale(client, statefulset, replicas) - gomega.Expect(scaledownErr).NotTo(gomega.HaveOccurred()) + if scaledownErr != nil { + return scaledownErr + } fss.WaitForStatusReadyReplicas(client, statefulset, replicas) ssPodsAfterScaleDown = fss.GetPodList(client, statefulset) } @@ -4781,75 +4909,124 @@ func scaleDownStatefulSetPod(ctx context.Context, client clientset.Interface, // After scale down, verify vSphere volumes are detached from deleted pods ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") for _, sspod := range ssPodsAfterScaleDown.Items { - _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + _, err = client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) if err != nil { - gomega.Expect(apierrors.IsNotFound(err), gomega.BeTrue()) - for _, volumespec := range sspod.Spec.Volumes { - if volumespec.PersistentVolumeClaim != nil { - pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) - isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode( - client, pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(isDiskDetached).To(gomega.BeTrue(), - fmt.Sprintf("Volume %q is not detached from the node %q", - pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName)) + if apierrors.IsNotFound(err) { + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + if !isMultiVcSetup { + isDiskDetached, detachErr := e2eVSphere.waitForVolumeDetachedFromNode( + client, pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName) + if detachErr != nil { + return detachErr + } + if !isDiskDetached { + return fmt.Errorf("volume %q is not detached from the node %q", + pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName) + } + } else { + isDiskDetached, detachErr := multiVCe2eVSphere.waitForVolumeDetachedFromNodeInMultiVC( + client, pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName) + if detachErr != nil { + return detachErr + } + if !isDiskDetached { + return fmt.Errorf("volume %q is not detached from the node %q", + pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName) + } + } + } } + } else { + return err } } } // After scale down, verify the attached volumes match those in CNS Cache for _, sspod := range ssPodsAfterScaleDown.Items { - _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + if err != nil { + return err + } for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { - pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) - err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, - volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !isMultiVcSetup { + pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } + } else { + pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } + } } } } + return nil } /* -scaleUpStatefulSetPod is a utility method which is used to scale up the count of StatefulSet replicas. +scaleUpStatefulSetPod util is used to perform scale up operation on StatefulSert Pods +and later verifies that after scale up operation volume get successfully attached to the node +and returns nil if no error found */ func scaleUpStatefulSetPod(ctx context.Context, client clientset.Interface, - statefulset *appsv1.StatefulSet, namespace string, replicas int32, parallelStatefulSetCreation bool) { + statefulset *appsv1.StatefulSet, namespace string, replicas int32, + parallelStatefulSetCreation bool, isMultiVcSetup bool) error { ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) var ssPodsAfterScaleUp *v1.PodList + var err error + if parallelStatefulSetCreation { _, scaleupErr := scaleStatefulSetPods(client, statefulset, replicas) - gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) + if scaleupErr != nil { + return scaleupErr + } fss.WaitForStatusReplicas(client, statefulset, replicas) fss.WaitForStatusReadyReplicas(client, statefulset, replicas) ssPodsAfterScaleUp = GetListOfPodsInSts(client, statefulset) - gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), - fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) - gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), - "Number of Pods in the statefulset should match with number of replicas") + if len(ssPodsAfterScaleUp.Items) == 0 { + return fmt.Errorf("unable to get list of Pods from the Statefulset: %v", statefulset.Name) + } + if len(ssPodsAfterScaleUp.Items) != int(replicas) { + return fmt.Errorf("number of Pods in the statefulset should match with number of replicas") + } } else { _, scaleupErr := fss.Scale(client, statefulset, replicas) - gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred()) + if scaleupErr != nil { + return scaleupErr + } fss.WaitForStatusReplicas(client, statefulset, replicas) fss.WaitForStatusReadyReplicas(client, statefulset, replicas) ssPodsAfterScaleUp = fss.GetPodList(client, statefulset) - gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), - fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) - gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), - "Number of Pods in the statefulset should match with number of replicas") + if len(ssPodsAfterScaleUp.Items) == 0 { + return fmt.Errorf("unable to get list of Pods from the Statefulset: %v", statefulset.Name) + } + if len(ssPodsAfterScaleUp.Items) != int(replicas) { + return fmt.Errorf("number of Pods in the statefulset should match with number of replicas") + } } // After scale up, verify all vSphere volumes are attached to node VMs. ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") for _, sspod := range ssPodsAfterScaleUp.Items { - err := fpod.WaitTimeoutForPodReadyInNamespace(client, sspod.Name, statefulset.Namespace, pollTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitTimeoutForPodReadyInNamespace(client, sspod.Name, statefulset.Namespace, pollTimeout) + if err != nil { + return err + } pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + return err + } for _, volumespec := range pod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { pv := getPvFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) @@ -4864,20 +5041,53 @@ func scaleUpStatefulSetPod(ctx context.Context, client clientset.Interface, } else { annotations := pod.Annotations vmUUID, exists = annotations[vmUUIDLabel] - gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("Pod doesn't have %s annotation", vmUUIDLabel)) - _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if !exists { + return fmt.Errorf("pod doesn't have %s annotation", vmUUIDLabel) + } + if !isMultiVcSetup { + _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) + if err != nil { + return err + } + } else { + _, err := multiVCe2eVSphere.getVMByUUIDForMultiVC(ctx, vmUUID) + if err != nil { + return err + } + } + } + if !isMultiVcSetup { + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID) + if err != nil { + return err + } + if !isDiskAttached { + return fmt.Errorf("disk is not attached to the node") + } + err = verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } + } else { + isDiskAttached, err := multiVCe2eVSphere.verifyVolumeIsAttachedToVMInMultiVC(client, + pv.Spec.CSI.VolumeHandle, vmUUID) + if err != nil { + return err + } + if !isDiskAttached { + return fmt.Errorf("disk is not attached to the node") + } + err = verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } } - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Disk is not attached to the node") - gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Disk is not attached") - ginkgo.By("After scale up, verify the attached volumes match those in CNS Cache") - err = verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, - volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } } + return nil } /* @@ -4982,20 +5192,30 @@ PV is provisioned. */ func verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment, namespace string, - allowedTopologies []v1.TopologySelectorLabelRequirement, parallelDeplCreation bool) { + allowedTopologies []v1.TopologySelectorLabelRequirement, + parallelDeplCreation bool, isMultiVcSetup bool) error { allowedTopologiesMap := createAllowedTopologiesMap(allowedTopologies) var pods *v1.PodList var err error + if parallelDeplCreation { pods, err = GetPodsForMultipleDeployment(client, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + return err + } } else { pods, err = fdep.GetPodsForDeployment(client, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + return err + } } + for _, sspod := range pods.Items { _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + return err + } + for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { // get pv details @@ -5005,87 +5225,130 @@ func verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx context.Co ginkgo.By("Verifying PV node affinity details") res, err := verifyVolumeTopologyForLevel5(pv, allowedTopologiesMap) if res { - framework.Logf("PV %s node affinity details lies in the specified allowed topologies of Storage Class", pv.Name) + framework.Logf("PV %s node affinity details lie in the specified allowed "+ + "topologies of Storage Class", pv.Name) + } + if !res { + return fmt.Errorf("PV %s node affinity details are not in the specified allowed "+ + "topologies of Storage Class", pv.Name) + } + if err != nil { + return err } - gomega.Expect(res).To(gomega.BeTrue(), "PV %s node affinity details is not in the "+ - "specified allowed topologies of Storage Class", pv.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // fetch node details nodeList, err := fnodes.GetReadySchedulableNodes(client) - framework.ExpectNoError(err, "Unable to find ready and schedulable Node") - if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") + if err != nil { + return err } + if len(nodeList.Items) <= 0 { + return fmt.Errorf("unable to find ready and schedulable Node") + } + // verify pod is running on appropriate nodes ginkgo.By("Verifying If Pods are running on appropriate nodes as mentioned in SC") res, err = verifyPodLocationLevel5(&sspod, nodeList, allowedTopologiesMap) if res { - framework.Logf("Pod %v is running on appropriate node as specified in the "+ - "allowed topolgies of Storage Class", sspod.Name) + framework.Logf("Pod %v is running on an appropriate node as specified in the "+ + "allowed topologies of Storage Class", sspod.Name) + } + if !res { + return fmt.Errorf("pod %v is not running on an appropriate node as specified in the "+ + "allowed topologies of Storage Class", sspod.Name) + } + if err != nil { + return err } - gomega.Expect(res).To(gomega.BeTrue(), "Pod %v is not running on appropriate node "+ - "as specified in allowed topolgies of Storage Class", sspod.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify the attached volume match the one in CNS cache - error := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, - volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) - gomega.Expect(error).NotTo(gomega.HaveOccurred()) + if !isMultiVcSetup { + err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } + } else { + err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + if err != nil { + return err + } + } } } } + + return nil } /* For Standalone Pod -verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5 for Standalone Pod verifies that PV +verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5 +verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5 for Standalone Pod verifies that PV node Affinity rules should match the topology constraints specified in the storage class. Also it verifies that a pod is scheduled on a node that belongs to the topology on which PV is provisioned. */ -func verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx context.Context, +func verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx context.Context, client clientset.Interface, pod *v1.Pod, namespace string, - allowedTopologies []v1.TopologySelectorLabelRequirement) { + allowedTopologies []v1.TopologySelectorLabelRequirement, isMultiVcSetup bool) error { allowedTopologiesMap := createAllowedTopologiesMap(allowedTopologies) for _, volumespec := range pod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { // get pv details pv := getPvFromClaim(client, pod.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + if pv == nil { + return fmt.Errorf("failed to get PV for claim: %s", volumespec.PersistentVolumeClaim.ClaimName) + } // verify pv node affinity details as specified on SC ginkgo.By("Verifying PV node affinity details") res, err := verifyVolumeTopologyForLevel5(pv, allowedTopologiesMap) - if res { - framework.Logf("PV %s node affinity details lies in the specified allowed topologies of Storage Class", pv.Name) + if err != nil { + return fmt.Errorf("error verifying PV node affinity: %v", err) + } + if !res { + return fmt.Errorf("PV %s node affinity details are not in the specified allowed "+ + "topologies of Storage Class", pv.Name) } - gomega.Expect(res).To(gomega.BeTrue(), "PV %s node affinity details is not in the specified "+ - "allowed topologies of Storage Class", pv.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) // fetch node details nodeList, err := fnodes.GetReadySchedulableNodes(client) - framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if err != nil { + return fmt.Errorf("error getting ready and schedulable nodes: %v", err) + } if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") + return errors.New("no ready and schedulable nodes found") } + // verify pod is running on appropriate nodes ginkgo.By("Verifying If Pods are running on appropriate nodes as mentioned in SC") res, err = verifyPodLocationLevel5(pod, nodeList, allowedTopologiesMap) - if res { - framework.Logf("Pod %v is running on appropriate node as specified in the allowed "+ - "topolgies of Storage Class", pod.Name) + if err != nil { + return fmt.Errorf("error verifying pod location: %v", err) + } + if !res { + return fmt.Errorf("pod %v is not running on appropriate node as specified in allowed "+ + "topologies of Storage Class", pod.Name) } - gomega.Expect(res).To(gomega.BeTrue(), "Pod %v is not running on appropriate node as "+ - "specified in allowed topolgies of Storage Class", pod.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // Verify the attached volume match the one in CNS cache - error := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, - volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, pod.Name) - gomega.Expect(error).NotTo(gomega.HaveOccurred()) + // Verify the attached volume matches the one in CNS cache + if !isMultiVcSetup { + err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, pod.Name) + if err != nil { + return fmt.Errorf("error verifying volume metadata in CNS: %v", err) + } + } else { + err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, pod.Name) + if err != nil { + return fmt.Errorf("error verifying volume metadata in CNS for multi-VC: %v", err) + } + } } } + return nil } func getPersistentVolumeSpecWithStorageClassFCDNodeSelector(volumeHandle string, @@ -5145,6 +5408,7 @@ func getPersistentVolumeSpecWithStorageClassFCDNodeSelector(volumeHandle string, func getNodeSelectorTerms(allowedTopologies []v1.TopologySelectorLabelRequirement) []v1.NodeSelectorTerm { var nodeSelectorRequirements []v1.NodeSelectorRequirement var nodeSelectorTerms []v1.NodeSelectorTerm + rackTopology := allowedTopologies[len(allowedTopologies)-1] for i := 0; i < len(allowedTopologies)-1; i++ { topologySelector := allowedTopologies[i] @@ -5154,7 +5418,7 @@ func getNodeSelectorTerms(allowedTopologies []v1.TopologySelectorLabelRequiremen nodeSelectorRequirement.Values = topologySelector.Values nodeSelectorRequirements = append(nodeSelectorRequirements, nodeSelectorRequirement) } - rackTopology := allowedTopologies[len(allowedTopologies)-1] + for i := 0; i < len(rackTopology.Values); i++ { var nodeSelectorTerm v1.NodeSelectorTerm var nodeSelectorRequirement v1.NodeSelectorRequirement @@ -5164,6 +5428,7 @@ func getNodeSelectorTerms(allowedTopologies []v1.TopologySelectorLabelRequiremen nodeSelectorTerm.MatchExpressions = append(nodeSelectorRequirements, nodeSelectorRequirement) nodeSelectorTerms = append(nodeSelectorTerms, nodeSelectorTerm) } + return nodeSelectorTerms } @@ -5195,80 +5460,6 @@ func createKubernetesClientFromConfig(kubeConfigPath string) (clientset.Interfac return client, nil } -// getVolumeSnapshotClassSpec returns a spec for the volume snapshot class -func getVolumeSnapshotClassSpec(deletionPolicy snapc.DeletionPolicy, - parameters map[string]string) *snapc.VolumeSnapshotClass { - var volumesnapshotclass = &snapc.VolumeSnapshotClass{ - TypeMeta: metav1.TypeMeta{ - Kind: "VolumeSnapshotClass", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "volumesnapshot-", - }, - Driver: e2evSphereCSIDriverName, - DeletionPolicy: deletionPolicy, - } - - volumesnapshotclass.Parameters = parameters - return volumesnapshotclass -} - -// getVolumeSnapshotSpec returns a spec for the volume snapshot -func getVolumeSnapshotSpec(namespace string, snapshotclassname string, pvcName string) *snapc.VolumeSnapshot { - var volumesnapshotSpec = &snapc.VolumeSnapshot{ - TypeMeta: metav1.TypeMeta{ - Kind: "VolumeSnapshot", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "snapshot-", - Namespace: namespace, - }, - Spec: snapc.VolumeSnapshotSpec{ - VolumeSnapshotClassName: &snapshotclassname, - Source: snapc.VolumeSnapshotSource{ - PersistentVolumeClaimName: &pvcName, - }, - }, - } - return volumesnapshotSpec -} - -// waitForVolumeSnapshotReadyToUse waits for the volume's snapshot to be in ReadyToUse -func waitForVolumeSnapshotReadyToUse(client snapclient.Clientset, ctx context.Context, namespace string, - name string) (*snapc.VolumeSnapshot, error) { - var volumeSnapshot *snapc.VolumeSnapshot - var err error - waitErr := wait.PollImmediate(poll, pollTimeout, func() (bool, error) { - volumeSnapshot, err = client.SnapshotV1().VolumeSnapshots(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return false, fmt.Errorf("error fetching volumesnapshot details : %v", err) - } - if volumeSnapshot.Status != nil && *volumeSnapshot.Status.ReadyToUse { - return true, nil - } - return false, nil - }) - return volumeSnapshot, waitErr -} - -// waitForVolumeSnapshotContentToBeDeleted wait till the volume snapshot content is deleted -func waitForVolumeSnapshotContentToBeDeleted(client snapclient.Clientset, ctx context.Context, - name string) error { - var err error - waitErr := wait.PollImmediate(poll, pollTimeout, func() (bool, error) { - _, err = client.SnapshotV1().VolumeSnapshotContents().Get(ctx, name, metav1.GetOptions{}) - if err != nil { - if strings.Contains(err.Error(), "not found") { - return true, nil - } else { - return false, fmt.Errorf("error fetching volumesnapshotcontent details : %v", err) - } - } - return false, nil - }) - return waitErr -} - // getK8sMasterNodeIPWhereControllerLeaderIsRunning fetches the master node IP // where controller is running func getK8sMasterNodeIPWhereContainerLeaderIsRunning(ctx context.Context, @@ -5431,53 +5622,9 @@ func getMasterIpFromMasterNodeName(ctx context.Context, client clientset.Interfa } } -// getVolumeSnapshotContentSpec returns a spec for the volume snapshot content -func getVolumeSnapshotContentSpec(deletionPolicy snapc.DeletionPolicy, snapshotHandle string, - futureSnapshotName string, namespace string) *snapc.VolumeSnapshotContent { - var volumesnapshotContentSpec = &snapc.VolumeSnapshotContent{ - TypeMeta: metav1.TypeMeta{ - Kind: "VolumeSnapshotContent", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "snapshotcontent-", - }, - Spec: snapc.VolumeSnapshotContentSpec{ - DeletionPolicy: deletionPolicy, - Driver: e2evSphereCSIDriverName, - Source: snapc.VolumeSnapshotContentSource{ - SnapshotHandle: &snapshotHandle, - }, - VolumeSnapshotRef: v1.ObjectReference{ - Name: futureSnapshotName, - Namespace: namespace, - }, - }, - } - return volumesnapshotContentSpec -} - -// getVolumeSnapshotSpecByName returns a spec for the volume snapshot by name -func getVolumeSnapshotSpecByName(namespace string, snapshotName string, - snapshotcontentname string) *snapc.VolumeSnapshot { - var volumesnapshotSpec = &snapc.VolumeSnapshot{ - TypeMeta: metav1.TypeMeta{ - Kind: "VolumeSnapshot", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: snapshotName, - Namespace: namespace, - }, - Spec: snapc.VolumeSnapshotSpec{ - Source: snapc.VolumeSnapshotSource{ - VolumeSnapshotContentName: &snapshotcontentname, - }, - }, - } - return volumesnapshotSpec -} - func createParallelStatefulSets(client clientset.Interface, namespace string, statefulset *appsv1.StatefulSet, replicas int32, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() ginkgo.By("Creating statefulset") ctx, cancel := context.WithCancel(context.Background()) @@ -5493,11 +5640,12 @@ func createParallelStatefulSetSpec(namespace string, no_of_sts int, replicas int var statefulset *appsv1.StatefulSet for i := 0; i < no_of_sts; i++ { + scName := defaultNginxStorageClassName statefulset = GetStatefulSetFromManifest(namespace) statefulset.Name = "thread-" + strconv.Itoa(i) + "-" + statefulset.Name statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = defaultNginxStorageClassName + Spec.StorageClassName = &scName statefulset.Spec.Replicas = &replicas stss = append(stss, statefulset) } @@ -5505,7 +5653,7 @@ func createParallelStatefulSetSpec(namespace string, no_of_sts int, replicas int } func createMultiplePVCsInParallel(ctx context.Context, client clientset.Interface, namespace string, - storageclass *storagev1.StorageClass, count int) []*v1.PersistentVolumeClaim { + storageclass *storagev1.StorageClass, count int, pvclaimlabels map[string]string) []*v1.PersistentVolumeClaim { var pvclaims []*v1.PersistentVolumeClaim for i := 0; i < count; i++ { pvclaim, err := createPVC(client, namespace, nil, "", storageclass, "") @@ -5542,7 +5690,7 @@ func ExecInStsPodsInNs(c clientset.Interface, ss *appsv1.StatefulSet, cmd string StatefulSetPoll := 10 * time.Second StatefulPodTimeout := 5 * time.Minute for _, statefulPod := range podList.Items { - stdout, err := framework.RunHostCmdWithRetries(statefulPod.Namespace, + stdout, err := e2eoutput.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) framework.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout) if err != nil { @@ -5576,49 +5724,6 @@ func deleteCsiControllerPodWhereLeaderIsRunning(ctx context.Context, return nil } -// getPersistentVolumeClaimSpecWithDatasource return the PersistentVolumeClaim -// spec with specified storage class. -func getPersistentVolumeClaimSpecWithDatasource(namespace string, ds string, storageclass *storagev1.StorageClass, - pvclaimlabels map[string]string, accessMode v1.PersistentVolumeAccessMode, - datasourceName string, snapshotapigroup string) *v1.PersistentVolumeClaim { - disksize := diskSize - if ds != "" { - disksize = ds - } - if accessMode == "" { - // If accessMode is not specified, set the default accessMode. - accessMode = v1.ReadWriteOnce - } - claim := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pvc-", - Namespace: namespace, - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{ - accessMode, - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(disksize), - }, - }, - StorageClassName: &(storageclass.Name), - DataSource: &v1.TypedLocalObjectReference{ - APIGroup: &snapshotapigroup, - Kind: "VolumeSnapshot", - Name: datasourceName, - }, - }, - } - - if pvclaimlabels != nil { - claim.Labels = pvclaimlabels - } - - return claim -} - // get topology cluster lists func ListTopologyClusterNames(topologyCluster string) []string { topologyClusterList := strings.Split(topologyCluster, ",") @@ -5685,41 +5790,21 @@ func powerOffEsxiHostByCluster(ctx context.Context, vs *vSphere, clusterName str return powerOffHostsList } -// getVolumeSnapshotSpecWithoutSC returns a spec for the volume snapshot -func getVolumeSnapshotSpecWithoutSC(namespace string, pvcName string) *snapc.VolumeSnapshot { - var volumesnapshotSpec = &snapc.VolumeSnapshot{ - TypeMeta: metav1.TypeMeta{ - Kind: "VolumeSnapshot", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "snapshot-", - Namespace: namespace, - }, - Spec: snapc.VolumeSnapshotSpec{ - Source: snapc.VolumeSnapshotSource{ - PersistentVolumeClaimName: &pvcName, - }, - }, - } - return volumesnapshotSpec -} - // waitForPvcToBeDeleted waits by polling for a particular pvc to be deleted in a namespace func waitForPvcToBeDeleted(ctx context.Context, client clientset.Interface, pvcName string, namespace string) error { - var pvc *v1.PersistentVolumeClaim waitErr := wait.PollImmediate(poll, pollTimeout, func() (bool, error) { - pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + _, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { if strings.Contains(err.Error(), "not found") { + framework.Logf("PVC is deleted: %v", pvcName) return true, nil } else { return false, fmt.Errorf("pvc %s is still not deleted in"+ - "namespace %s with err: %v", pvc.Name, namespace, err) + "namespace %s with err: %v", pvcName, namespace, err) } } return false, nil }) - framework.Logf("Status of pvc is: %v", pvc.Status.Phase) return waitErr } @@ -5751,29 +5836,33 @@ func waitForEventWithReason(client clientset.Interface, namespace string, } // stopCSIPods function stops all the running csi pods -func stopCSIPods(ctx context.Context, client clientset.Interface) (bool, error) { +func stopCSIPods(ctx context.Context, client clientset.Interface, namespace string) (bool, error) { collectPodLogs(ctx, client, csiSystemNamespace) isServiceStopped := false err := updateDeploymentReplicawithWait(client, 0, vSphereCSIControllerPodNamePrefix, - csiSystemNamespace) + namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred()) isServiceStopped = true return isServiceStopped, err } // startCSIPods function starts the csi pods and waits till all the pods comes up -func startCSIPods(ctx context.Context, client clientset.Interface, csiReplicas int32) (bool, error) { +func startCSIPods(ctx context.Context, client clientset.Interface, csiReplicas int32, + namespace string) (bool, error) { ignoreLabels := make(map[string]string) err := updateDeploymentReplicawithWait(client, csiReplicas, vSphereCSIControllerPodNamePrefix, - csiSystemNamespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + namespace) + if err != nil { + return true, err + } // Wait for the CSI Pods to be up and Running - list_of_pods, err := fpod.GetPodsInNamespace(client, csiSystemNamespace, ignoreLabels) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + list_of_pods, err := fpod.GetPodsInNamespace(client, namespace, ignoreLabels) + if err != nil { + return true, err + } num_csi_pods := len(list_of_pods) - err = fpod.WaitForPodsRunningReady(client, csiSystemNamespace, int32(num_csi_pods), 0, + err = fpod.WaitForPodsRunningReady(client, namespace, int32(num_csi_pods), 0, pollTimeout, ignoreLabels) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) isServiceStopped := false return isServiceStopped, err } @@ -6025,8 +6114,13 @@ func getCSIPodWhereListVolumeResponseIsPresent(ctx context.Context, csiPods, err := fpod.GetPodsInNamespace(client, csiSystemNamespace, ignoreLabels) gomega.Expect(err).NotTo(gomega.HaveOccurred()) var k8sMasterIP string - k8sMasterIPs := getK8sMasterIPs(ctx, client) - k8sMasterIP = k8sMasterIPs[0] + if vanillaCluster { + k8sMasterIPs := getK8sMasterIPs(ctx, client) + k8sMasterIP = k8sMasterIPs[0] + } else { + k8sMasterIP = GetAndExpectStringEnvVar(svcMasterIP) + } + for _, csiPod := range csiPods { if strings.Contains(csiPod.Name, vSphereCSIControllerPodNamePrefix) { // Putting the grepped logs for leader of container of different CSI pods @@ -6084,3 +6178,342 @@ func getAllPVCFromNamespace(client clientset.Interface, namespace string) *v1.Pe gomega.Expect(pvcList).NotTo(gomega.BeNil()) return pvcList } + +// CheckDevice helps verify the raw block device inside pod is accessible correctly +func CheckDevice(client clientset.Interface, sts *appsv1.StatefulSet, devicePath string) error { + for _, cmd := range []string{ + fmt.Sprintf("ls -idlh %v", devicePath), + fmt.Sprintf("find %v", devicePath), + fmt.Sprintf("dd if=/dev/zero of=%v bs=1024 count=1 seek=0", devicePath), + } { + if err := fss.ExecInStatefulPods(client, sts, cmd); err != nil { + return fmt.Errorf("failed to check device in command %v, err %v", cmd, err) + } + } + return nil +} + +// verifyIOOnRawBlockVolume helps check data integrity for raw block volumes +func verifyIOOnRawBlockVolume(ns string, podName string, devicePath string, testdataFile string, + startSizeInMB, dataSizeInMB int64) { + //Write some data to file first and then to raw block device + writeDataOnRawBlockVolume(ns, podName, devicePath, testdataFile, startSizeInMB, dataSizeInMB) + // Read the data to verify that is it same as what written + verifyDataFromRawBlockVolume(ns, podName, devicePath, testdataFile, startSizeInMB, dataSizeInMB) +} + +// writeDataOnRawBlockVolume writes test data to raw block device +func writeDataOnRawBlockVolume(ns string, podName string, devicePath string, testdataFile string, + startSizeInMB, dataSizeInMB int64) { + cmd := []string{"exec", podName, "--namespace=" + ns, "--", "/bin/sh", "-c", + fmt.Sprintf("/bin/ls %v", devicePath)} + _, err := e2ekubectl.RunKubectl(ns, cmd...) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _ = e2ekubectl.RunKubectlOrDie(ns, "cp", testdataFile, fmt.Sprintf( + "%v/%v:/tmp/data_to_write", ns, podName)) + framework.ExpectNoError(err, fmt.Sprintf("failed to write testdata inside the pod: %q", podName)) + + // If startSizeInMB is given, fill 1M with testData from offset=startSizeInMB given, upto dataSizeInMB. + // Otherwise write the testData given from offset=0. + gomega.Expect(dataSizeInMB).NotTo(gomega.BeZero()) + for i := int64(0); i < dataSizeInMB; i = i + 1 { + seek := fmt.Sprintf("%v", startSizeInMB+i) + cmd = []string{"exec", podName, "--namespace=" + ns, "--", "/bin/sh", "-c", + fmt.Sprintf("/bin/dd if=/tmp/data_to_write of=%v bs=1M count=1 conv=fsync seek=%v", + devicePath, seek)} + _, err = e2ekubectl.RunKubectl(ns, cmd...) + framework.ExpectNoError(err, fmt.Sprintf("failed to write device: %q inside the pod: %q", devicePath, podName)) + } + cmd = []string{"--namespace=" + ns, "exec", podName, "--", "/bin/sh", "-c", "rm /tmp/data_to_write"} + _ = e2ekubectl.RunKubectlOrDie(ns, cmd...) +} + +// verifyDataFromRawBlockVolume reads data from raw block device and verifies it against given input +func verifyDataFromRawBlockVolume(ns string, podName string, devicePath string, testdataFile string, + startSizeInMB, dataSizeInMB int64) { + cmd := []string{"exec", podName, "--namespace=" + ns, "--", "/bin/sh", "-c", + fmt.Sprintf("/bin/ls %v", devicePath)} + _, err := e2ekubectl.RunKubectl(ns, cmd...) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify testData written on 1MB upto dataSizeInMB from the specified offset=startSizeInMB + // Otherwise verify the testdata specified from offset=0 + gomega.Expect(dataSizeInMB).NotTo(gomega.BeZero()) + for i := int64(0); i < dataSizeInMB; i = i + 1 { + skip := fmt.Sprintf("%v", startSizeInMB+i) + cmd = []string{"exec", podName, "--namespace=" + ns, "--", "/bin/sh", "-c", + fmt.Sprintf("/bin/dd if=%v of=/tmp/data_to_read bs=1M count=1 skip=%v", devicePath, skip)} + _, err = e2ekubectl.RunKubectl(ns, cmd...) + framework.ExpectNoError(err, fmt.Sprintf("failed to read device: %q inside the pod: %q", devicePath, podName)) + _ = e2ekubectl.RunKubectlOrDie(ns, "cp", + fmt.Sprintf("%v/%v:/tmp/data_to_read", ns, podName), testdataFile+podName) + + framework.Logf("Running diff with source file and file from pod %v for 1M starting %vM", podName, skip) + op, err := exec.Command("diff", testdataFile, testdataFile+podName).Output() + framework.Logf("diff: ", op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(op)).To(gomega.BeZero()) + } + +} + +// getBlockDevSizeInBytes returns size of block device at given path +func getBlockDevSizeInBytes(f *framework.Framework, ns string, pod *v1.Pod, devicePath string) (int64, error) { + cmd := []string{"exec", pod.Name, "--namespace=" + ns, "--", "/bin/sh", "-c", + fmt.Sprintf("/bin/blockdev --getsize64 %v", devicePath)} + output, err := e2ekubectl.RunKubectl(ns, cmd...) + if err != nil { + return -1, fmt.Errorf("failed to get size of raw device %v inside pod", devicePath) + } + output = strings.TrimSuffix(output, "\n") + return strconv.ParseInt(output, 10, 64) +} + +// checkClusterIdValueOnWorkloads checks clusterId value by querying cns metadata +// for all k8s workloads in a particular namespace +func checkClusterIdValueOnWorkloads(vs *vSphere, client clientset.Interface, + ctx context.Context, namespace string, clusterID string) error { + podList, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, pod := range podList.Items { + pvcName := pod.Spec.Volumes[0].PersistentVolumeClaim.ClaimName + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pvName := pvc.Spec.VolumeName + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeID := pv.Spec.CSI.VolumeHandle + queryResult, err := vs.queryCNSVolumeWithResult(volumeID) + if err != nil { + return err + } + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + if len(queryResult.Volumes) != 1 || queryResult.Volumes[0].VolumeId.Id != volumeID { + return fmt.Errorf("failed to query cns volume %s", volumeID) + } + for _, metadata := range queryResult.Volumes[0].Metadata.EntityMetadata { + kubernetesMetadata := metadata.(*cnstypes.CnsKubernetesEntityMetadata) + if kubernetesMetadata.EntityType == "POD" && kubernetesMetadata.ClusterID != clusterID { + return fmt.Errorf("clusterID %s is not matching with %s ", clusterID, kubernetesMetadata.ClusterID) + } else if kubernetesMetadata.EntityType == "PERSISTENT_VOLUME" && + kubernetesMetadata.ClusterID != clusterID { + return fmt.Errorf("clusterID %s is not matching with %s ", clusterID, kubernetesMetadata.ClusterID) + } else if kubernetesMetadata.EntityType == "PERSISTENT_VOLUME_CLAIM" && + kubernetesMetadata.ClusterID != clusterID { + return fmt.Errorf("clusterID %s is not matching with %s ", clusterID, kubernetesMetadata.ClusterID) + } + } + framework.Logf("successfully verified clusterID of the volume %q", volumeID) + } + return nil +} + +// Clean up statefulset and make sure no volume is left in CNS after it is deleted from k8s +// TODO: Code improvements is needed in case if the function is called from snapshot test. +// add a logic to delete the snapshots for the volumes and then delete volumes +func cleaupStatefulset(client clientset.Interface, ctx context.Context, namespace string, + statefulset *appsv1.StatefulSet) { + scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace) + pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, claim := range pvcs.Items { + pv := getPvFromClaim(client, namespace, claim.Name) + err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS") + err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll, + pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volumeHandle := pv.Spec.CSI.VolumeHandle + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+ + "kubernetes", volumeHandle)) + } +} + +// getVsanDPersistentVolumeClaimSpecWithStorageClass return the PersistentVolumeClaim +// spec for vsanDirect datastore with specified storage class. +func getVsanDPersistentVolumeClaimSpecWithStorageClass(namespace string, ds string, + storageclass *storagev1.StorageClass, pvcName string, podName string, + accessMode v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim { + pvcAnnotations := make(map[string]string) + pvcAnnotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + pvcAnnotations["placement.beta.vmware.com/storagepool_antiAffinityRequired"] = podName + + pvclaimlabels := make(map[string]string) + pvclaimlabels["supervisor"] = "true" + + disksize := diskSize + if ds != "" { + disksize = ds + } + if accessMode == "" { + // If accessMode is not specified, set the default accessMode. + accessMode = v1.ReadWriteOnce + } + + claim := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + }, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + accessMode, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(disksize), + }, + }, + StorageClassName: &(storageclass.Name), + }, + } + claim.Labels = pvclaimlabels + claim.Annotations = pvcAnnotations + framework.Logf("pvc spec: %v", claim) + return claim +} + +// getVsanDPodSpec returns pod spec for vsan direct datastore for a given persistentVolumeClaim +func getVsanDPodSpec(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, + isPrivileged bool, command string, podName string) *v1.Pod { + + podlabels := make(map[string]string) + podlabels["psp.vmware.com/pod-placement-opt-in"] = "true" + podSpec := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: ns, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "write-pod", + Image: nginxImage, + SecurityContext: fpod.GenerateContainerSecurityContext(isPrivileged), + }, + }, + RestartPolicy: v1.RestartPolicyOnFailure, + }, + } + var volumeMounts = make([]v1.VolumeMount, len(pvclaims)) + var volumes = make([]v1.Volume, len(pvclaims)) + for index, pvclaim := range pvclaims { + volumename := "data0" + volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/data0"} + volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} + } + podSpec.Spec.Containers[0].VolumeMounts = volumeMounts + podSpec.Spec.Volumes = volumes + if nodeSelector != nil { + podSpec.Spec.NodeSelector = nodeSelector + } + + podSpec.Labels = podlabels + podSpec.Spec.ServiceAccountName = "default" + podSpec.Spec.RestartPolicy = v1.RestartPolicyAlways + taintKeys := []string{v1.TaintNodeNotReady, v1.TaintNodeUnreachable} + podSpec.Spec.Subdomain = "sample-pe-svc" + var x int64 = 300 + var tolerations []v1.Toleration + for _, taintkey := range taintKeys { + var toleration v1.Toleration + toleration.Key = taintkey + toleration.Operator = v1.TolerationOpExists + toleration.TolerationSeconds = &x + toleration.Effect = v1.TaintEffectNoExecute + framework.Logf("toleration: %v", toleration) + tolerations = append(tolerations, toleration) + } + podSpec.Spec.Tolerations = tolerations + framework.Logf("pod spec: %v", podSpec) + return podSpec +} + +// createVsanDPvcAndPod creates pvc and pod for vsan direct as per wffc policy. +// It ensures that pvc and pod is in in healthy state. +func createVsanDPvcAndPod(client clientset.Interface, ctx context.Context, + namespace string, sc *storagev1.StorageClass, pvcName string, + podName string) (*v1.PersistentVolumeClaim, *v1.Pod) { + framework.Logf("Creating pvc %s with storage class %s", pvcName, sc.Name) + pvclaim, err := fpv.CreatePVC(client, namespace, + getVsanDPersistentVolumeClaimSpecWithStorageClass(namespace, + diskSize, sc, pvcName, podName, "")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expect claim status to be in Pending state") + err = fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, client, + namespace, pvclaim.Name, framework.Poll, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + "Failed to find the volume: %s in pending state with err: %v", pvcName, err) + + ginkgo.By("Creating a pod") + podSpec := getVsanDPodSpec(namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand, podName) + pod, err := client.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = fpod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Expect claim to be in Bound state and provisioning volume passes") + err = fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, + namespace, pvclaim.Name, framework.Poll, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to provision volume with err: %v", err) + + return pvclaim, pod +} + +// writeDataToMultipleFilesOnPodInParallel writes data to multiple files +// on a given pod in parallel +func writeDataToMultipleFilesOnPodInParallel(namespace string, podName string, data string, + wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() + defer wg.Done() + for i := 0; i < 10; i++ { + ginkgo.By("write to a file in pod") + filePath := fmt.Sprintf("/mnt/volume1/file%v.txt", i) + writeDataOnFileFromPod(namespace, podName, filePath, data) + } + +} + +// byFirstTimeStamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker. +type byFirstTimeStamp []v1.Event + +func (o byFirstTimeStamp) Len() int { return len(o) } +func (o byFirstTimeStamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o byFirstTimeStamp) Less(i, j int) bool { + if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) { + return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name + } + return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp) +} + +// dumpSvcNsEventsOnTestFailure dumps the events from the given namespace in case of test failure +func dumpSvcNsEventsOnTestFailure(client clientset.Interface, namespace string) { + if !ginkgo.CurrentSpecReport().Failed() { + return + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + events, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By(fmt.Sprintf("Found %d events in svc ns %s.", len(events.Items), namespace)) + sortedEvents := events.Items + if len(sortedEvents) > 1 { + sort.Sort(byFirstTimeStamp(sortedEvents)) + } + for _, e := range sortedEvents { + framework.Logf( + "At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message) + } +} diff --git a/tests/e2e/vc_reboot_volume_lifecycle.go b/tests/e2e/vc_reboot_volume_lifecycle.go index a7c09895be..789b274acb 100644 --- a/tests/e2e/vc_reboot_volume_lifecycle.go +++ b/tests/e2e/vc_reboot_volume_lifecycle.go @@ -65,10 +65,12 @@ var _ bool = ginkgo.Describe("Verify volume life_cycle operations works fine aft ginkgo.AfterEach(func() { if supervisorCluster || guestCluster { deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } }) @@ -125,11 +127,6 @@ var _ bool = ginkgo.Describe("Verify volume life_cycle operations works fine aft } }() - defer func() { - err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - ginkgo.By("Waiting for claim to be in bound phase") pvc, err := fpv.WaitForPVClaimBoundPhase(client, []*v1.PersistentVolumeClaim{pvclaim}, framework.ClaimProvisionTimeout) @@ -138,6 +135,13 @@ var _ bool = ginkgo.Describe("Verify volume life_cycle operations works fine aft pv := getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name) volumeID := pv.Spec.CSI.VolumeHandle + defer func() { + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + if guestCluster { // svcPVCName refers to PVC Name in the supervisor cluster svcPVCName = volumeID diff --git a/tests/e2e/vcp_to_csi_attach_detach.go b/tests/e2e/vcp_to_csi_attach_detach.go index 9e09d9649f..d2613eed06 100644 --- a/tests/e2e/vcp_to_csi_attach_detach.go +++ b/tests/e2e/vcp_to_csi_attach_detach.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" admissionapi "k8s.io/pod-security-admission/api" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/migration/v1alpha1" @@ -1105,7 +1106,7 @@ func createMultiplePods(ctx context.Context, client clientset.Interface, "Volume is not attached to the node volHandle: %s, vmUUID: %s", volHandle, vmUUID) } ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(pvc.Namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(pvc.Namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/vcp_to_csi_syncer.go b/tests/e2e/vcp_to_csi_syncer.go index 04f1fdcfaa..45769ef98a 100644 --- a/tests/e2e/vcp_to_csi_syncer.go +++ b/tests/e2e/vcp_to_csi_syncer.go @@ -21,6 +21,7 @@ import ( "fmt" "math/rand" "os" + "os/exec" "strconv" "strings" "time" @@ -44,6 +45,7 @@ import ( fdep "k8s.io/kubernetes/test/e2e/framework/deployment" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" fssh "k8s.io/kubernetes/test/e2e/framework/ssh" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" @@ -75,6 +77,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration syncer tests", func( fullSyncWaitTime int podsToDelete []*v1.Pod migrationEnabledByDefault bool + rawBlockVolumeMode = v1.PersistentVolumeBlock ) ginkgo.BeforeEach(func() { @@ -739,7 +742,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration syncer tests", func( statefulset := GetStatefulSetFromManifest(namespace) temp := statefulset.Spec.VolumeClaimTemplates - temp[0].Annotations[scAnnotation4Statefulset] = vcpSc.Name + temp[0].Spec.StorageClassName = &vcpSc.Name statefulset.Spec.PodManagementPolicy = appsv1.ParallelPodManagement ginkgo.By("Creating statefulset and waiting for the replicas to be ready") CreateStatefulSet(namespace, statefulset, client) @@ -831,6 +834,190 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration syncer tests", func( toggleCSIMigrationFeatureGatesOnK8snodes(ctx, client, false, namespace) }) + // TC to verify VCP to CSI migration workflow when xfs filesystem is used in VCP + // Steps: + // 1. Create SC1 StorageClass in VCP and use xfs as fstype. + // 2. Create PVC pvc1 using this SC + // 3. Create deployment using SC1 with 1 replica. + // 4. Wait for replica to come up. + // 5. Verify that filesystem used to mount volume inside pod is xfs. + // 6. Create file file1.txt at mountpath. + // 7. Enable CSIMigration and CSIMigrationvSphere feature gates on + // kube-controller-manager (& restart). + // 8. Verify PV/PVCs used by deployment have the following annotation - + // "pv.kubernetes.io/migrated-to": "csi.vsphere.vmware.com". + // 9. Verify cnsvspherevolumemigrations crd is created for PV/PVCs used + // by deployment. + // 10. Repeat the following steps for all the nodes in the k8s cluster. + // a. Drain and Cordon off the node. + // b. Enable CSIMigration and CSIMigrationvSphere feature gates on the + // kubelet and Restart kubelet. + // c. Verify CSI node for the corresponding K8s node has the following + // annotation - storage.alpha.kubernetes.io/migrated-plugins. + // d. Enable scheduling on the node. + // 11. Verify that filesystem used is xfs inside pod even after migration + // 12. Write new data at mountpath and verify that write is successful. + // 13. Create a new PVC post migration. + // 14. Verify "pv.kubernetes.io/provisioned-by": "csi.vsphere.vmware.com" + // annotation on new pvc created post migration. + // 15. Verify cnsvspherevolumemigrations crd is created for newly created PVC. + // 16. Scale down deployment replicas to 0. + // All cleanup will be done as part of AfterEach() function: + // 17. Delete deployment. + // 18. Delete all PVCs. + // 19. Wait for PVs and respective vmdks to get deleted. + // 20. Verify cnsvspherevolumemigrations crds are removed for all PV/PVCs. + // 21. Verify CNS entries are removed for all PVCs. + // 22. Delete SC1. + ginkgo.It("TC to verify VCP to CSI migration workflow when xfs filesystem is used in VCP", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By("Creating VCP SC with fstype as xfs") + scParams := make(map[string]string) + scParams[vcpScParamDatastoreName] = GetAndExpectStringEnvVar(envSharedDatastoreName) + scParams[vcpScParamFstype] = "xfs" + vcpSc, err := createVcpStorageClass(client, scParams, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vcpScs = append(vcpScs, vcpSc) + + ginkgo.By("Creating VCP PVC pvc1 before migration") + pvc1, err := createPVC(client, namespace, nil, "", vcpSc, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vcpPvcsPreMig = append(vcpPvcsPreMig, pvc1) + + ginkgo.By("Waiting for all claims created before migration to be in bound state") + vcpPvsPreMig, err = fpv.WaitForPVClaimBoundPhase(client, vcpPvcsPreMig, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a Deployment using pvc1") + labelsMap := make(map[string]string) + labelsMap["dep-lkey"] = "lval" + dep1, err := createDeployment(ctx, client, 1, labelsMap, nil, + namespace, []*v1.PersistentVolumeClaim{pvc1}, execCommand, false, busyBoxImageOnGcr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pods, err := fdep.GetPodsForDeployment(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pod := pods.Items[0] + err = fpod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Check filesystem used to mount volume inside pod is xfs as expeted + ginkgo.By("Verify if filesystem used to mount volume is xfs as expected") + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + xfsFSType, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create file1.txt at mountpath inside pod. + ginkgo.By(fmt.Sprintf("Creating file file1.txt at mountpath inside pod: %v", pod.Name)) + data1 := "This file file1.txt is written before migration" + filePath1 := "/mnt/volume1/file1.txt" + writeDataOnFileFromPod(namespace, pod.Name, filePath1, data1) + + ginkgo.By("Enabling CSIMigration and CSIMigrationvSphere feature gates on kube-controller-manager") + err = toggleCSIMigrationFeatureGatesOnKubeControllerManager(ctx, client, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + kcmMigEnabled = true + + ginkgo.By("Waiting for migration related annotations on PV/PVCs created before migration") + waitForMigAnnotationsPvcPvLists(ctx, client, vcpPvcsPreMig, vcpPvsPreMig, true, migrationEnabledByDefault) + + ginkgo.By("Verify CnsVSphereVolumeMigration crds and CNS volume metadata on pvc created before migration") + verifyCnsVolumeMetadataAndCnsVSphereVolumeMigrationCrdForPvcs(ctx, client, vcpPvcsPreMig) + + ginkgo.By("Enable CSI migration feature gates on kublets on k8s nodes") + toggleCSIMigrationFeatureGatesOnK8snodes(ctx, client, true, namespace) + kubectlMigEnabled = true + + // Verify that fstype used is still xfs after migration + // verify that data can be read successfully that was written before migration + // Verify that new write is successful post migration + dep1, err = client.AppsV1().Deployments(namespace).Get(ctx, dep1.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podsAfterMig, err := fdep.GetPodsForDeployment(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podAfterMig := podsAfterMig.Items[0] + ginkgo.By("Verify if filesystem used to mount volume is xfs post migration") + _, err = e2eoutput.LookForStringInPodExec(namespace, podAfterMig.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + xfsFSType, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify that data can be successfully read from file1.txt which was written before migration") + output := readFileFromPod(namespace, podAfterMig.Name, filePath1) + gomega.Expect(output == data1+"\n").To(gomega.BeTrue(), "Pod is not able to read file1.txt post migration") + + // Create new file file2.txt at mountpath inside pod. + ginkgo.By(fmt.Sprintf("Creating file file2.txt at mountpath inside pod: %v", podAfterMig.Name)) + data2 := "This file file2.txt is written post migration" + filePath2 := "/mnt/volume1/file2.txt" + writeDataOnFileFromPod(namespace, podAfterMig.Name, filePath2, data2) + + ginkgo.By("Verify that data written post migration can be successfully read from file2.txt") + output = readFileFromPod(namespace, podAfterMig.Name, filePath2) + gomega.Expect(output == data2+"\n").To(gomega.BeTrue(), "Pod is not able to read file2.txt") + + ginkgo.By("Creating VCP PVC pvc2 post migration") + pvc2, err := createPVC(client, namespace, nil, "", vcpSc, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vcpPvcsPostMig = append(vcpPvcsPostMig, pvc2) + + ginkgo.By("Waiting for all claims created post migration to be in bound state") + vcpPvsPostMig, err = fpv.WaitForPVClaimBoundPhase(client, vcpPvcsPostMig, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify annotations on PV/PVCs created post migration") + waitForMigAnnotationsPvcPvLists(ctx, client, vcpPvcsPostMig, vcpPvsPostMig, false, migrationEnabledByDefault) + + ginkgo.By("Wait and verify CNS entries for all CNS volumes created post migration " + + "along with their respective CnsVSphereVolumeMigration CRDs") + verifyCnsVolumeMetadataAndCnsVSphereVolumeMigrationCrdForPvcs(ctx, client, vcpPvcsPostMig) + + ginkgo.By("Creating a new deployment using pvc2") + dep2, err := createDeployment(ctx, client, 1, labelsMap, nil, + namespace, []*v1.PersistentVolumeClaim{pvc2}, execCommand, false, busyBoxImageOnGcr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + podsDep2, err := fdep.GetPodsForDeployment(client, dep2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podDep2 := podsDep2.Items[0] + err = fpod.WaitForPodNameRunningInNamespace(client, podDep2.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Check filesystem used to mount volume inside pod is xfs as expeted + ginkgo.By("Verify if filesystem used to mount volume is xfs as expected") + _, err = e2eoutput.LookForStringInPodExec(namespace, podDep2.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + xfsFSType, time.Minute) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Scale down deployment1 to 0 replica") + dep1, err = client.AppsV1().Deployments(namespace).Get(ctx, dep1.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pods, err = fdep.GetPodsForDeployment(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pod = pods.Items[0] + rep := dep1.Spec.Replicas + *rep = 0 + dep1.Spec.Replicas = rep + _, err = client.AppsV1().Deployments(namespace).Update(ctx, dep1, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitForPodNotFoundInNamespace(client, pod.Name, namespace, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Scale down deployment2 to 0 replica") + dep2, err = client.AppsV1().Deployments(namespace).Get(ctx, dep2.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pods, err = fdep.GetPodsForDeployment(client, dep2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pod = pods.Items[0] + rep2 := dep2.Spec.Replicas + *rep2 = 0 + dep2.Spec.Replicas = rep2 + _, err = client.AppsV1().Deployments(namespace).Update(ctx, dep2, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitForPodNotFoundInNamespace(client, pod.Name, namespace, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + // Verify label and pod name updates with Deployment. // Steps: // 1. Create SC1 VCP SC. @@ -1314,7 +1501,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration syncer tests", func( for i := 0; i < 10; i++ { statefulset := GetStatefulSetFromManifest(ns.Name) temp := statefulset.Spec.VolumeClaimTemplates - temp[0].Annotations[scAnnotation4Statefulset] = vcpSc.Name + temp[0].Spec.StorageClassName = &vcpSc.Name statefulset.Name = "pre-sts" + strconv.Itoa(i) statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name @@ -1357,7 +1544,7 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration syncer tests", func( for i := 0; i < 10; i++ { statefulset := GetStatefulSetFromManifest(ns.Name) temp := statefulset.Spec.VolumeClaimTemplates - temp[0].Annotations[scAnnotation4Statefulset] = vcpSc.Name + temp[0].Spec.StorageClassName = &vcpSc.Name statefulset.Name = "post-sts" + strconv.Itoa(i) statefulset.Spec.Template.Labels["app"] = statefulset.Name statefulset.Spec.Selector.MatchLabels["app"] = statefulset.Name @@ -1429,6 +1616,248 @@ var _ = ginkgo.Describe("[csi-vcp-mig] VCP to CSI migration syncer tests", func( }) + /* + Test VCP-to-CSI migration with raw block volume using deployment + Steps: + 1. Create SC1 VCP SC. + 2. Create PVC1 using SC1 with volumeMode=Block and wait for binding with PV (say PV1). + 3. Create nginx deployment DEP1 using PVC1 with 1 replica. + 4. Wait for all the replicas to come up. + 5. Write and read some data on raw block PVC PVC1 inside deployment pod. + 6. Enable CSIMigration and CSIMigrationvSphere feature gates on + kube-controller-manager (& restart). + 7. Repeat the following steps for all the nodes in the k8s cluster. + a. Drain and Cordon off the node. + b. Enable CSIMigration and CSIMigrationvSphere feature gates on the + kubelet and Restart kubelet. + c. Verify CSI node for the corresponding K8s node has the following + annotation - storage.alpha.kubernetes.io/migrated-plugins. + d. Enable scheduling on the node. + 8. Verify all PVC1 and PV1 and have the following annotation - + "pv.kubernetes.io/migrated-to": "csi.vsphere.vmware.com". + 9. Verify cnsvspherevolumemigrations crd is created for PVC1 and PV1. + 10. Verify CNS entries are present for all PVC1 and PV1 and all PVCs has + correct pod names. + 11. Verify data written before migration on PVC1. + 12. Create PVC2 using SC1 with volumeMode=Block and wait for binding with PV (say PV2). + 13. Verify cnsvspherevolumemigrations crd is created for PVC2 and PV2. + 14. Patch DEP1 to use PVC2 as well. + 15. Verify CNS entries are present for present for PV2 and PVC2. + 16. Verify CNS entries for PVC1 and PVC2 have correct pod names. + 17. Write and read some data on raw block PVC PVC2 inside deployment pod. + 18. Scale down DEP1 replicas to 0 replicas and wait for PVC1 and PVC2 + to detach. + 19. Verify CNS entries for PVC1 and PVC2 have pod names removed. + 20. Delete DEP1. + 21. Wait for PV1 and PV2 and respective vmdks to get deleted. + 22. Verify cnsvspherevolumemigrations crds are removed for all PV1, PV2, + PVC1 and PVC2. + 23. Verify CNS entries are removed for PV1, PV2, PVC1 and PVC2. + Following steps will be done as a part of AfterEach() call. + 24. Delete SC1. + 25. Repeat the following steps for all the nodes in the k8s cluster. + a. Drain and Cordon off the node. + b. Disable CSIMigration and CSIMigrationvSphere feature gates on the + kubelet and Restart kubelet. + c. Verify CSI node for the corresponding K8s node does not have the + following annotation - storage.alpha.kubernetes.io/migrated-plugins. + d. Enable scheduling on the node. + 26. Disable CSIMigration and CSIMigrationvSphere feature gates on + kube-controller-manager (& restart). + */ + ginkgo.It("Test VCP-to-CSI migration with raw block volume", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ginkgo.By("Creating VCP SC") + scParameters := make(map[string]string) + scParameters[vcpScParamDatastoreName] = GetAndExpectStringEnvVar(envSharedDatastoreName) + vcpSc, err := createVcpStorageClass(client, scParameters, nil, "", "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, vcpSc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating VCP PVC pvc1 with raw block volumemode before migration") + vcpPvcSpec := getPersistentVolumeClaimSpecWithStorageClass(namespace, "", vcpSc, nil, "") + vcpPvcSpec.Spec.VolumeMode = &rawBlockVolumeMode + pvc1, err := fpv.CreatePVC(client, namespace, vcpPvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) + vcpPvcsPreMig = append(vcpPvcsPreMig, pvc1) + + ginkgo.By("Waiting for all claims created before migration to be in bound state") + vcpPvsPreMig, err = fpv.WaitForPVClaimBoundPhase(client, vcpPvcsPreMig, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + labelsMap := make(map[string]string) + labelsMap["dep-lkey"] = "lval" + ginkgo.By("Creating a Deployment using raw block PVC pvc1") + volumeIndex := 1 + volumeName := fmt.Sprintf("volume%v", volumeIndex) + pvc1_devicePath := fmt.Sprintf("%v%v", pod_devicePathPrefix, volumeIndex) + depSpec := getDeploymentSpec(ctx, client, 1, labelsMap, nil, + namespace, []*v1.PersistentVolumeClaim{pvc1}, + "trap exit TERM; while true; do sleep 1; done", false, busyBoxImageOnGcr) + depSpec.Spec.Template.Spec.Containers[len(depSpec.Spec.Template.Spec.Containers)-1].VolumeMounts = nil + depSpec.Spec.Template.Spec.Containers[len(depSpec.Spec.Template.Spec.Containers)-1]. + VolumeDevices = []v1.VolumeDevice{ + { + Name: volumeName, + DevicePath: pvc1_devicePath, + }, + } + dep1, err := client.AppsV1().Deployments(namespace).Create(ctx, depSpec, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Errorf("deployment %q Create API error: %v", depSpec.Name, err)) + + ginkgo.By("Waiting deployment to complete") + err = fdep.WaitForDeploymentComplete(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Errorf("deployment %q failed to complete: %v", depSpec.Name, err)) + + pods, err := fdep.GetPodsForDeployment(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pod := pods.Items[0] + err = fpod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Write and read some data on raw block volume inside the deployment pod. + // Use same devicePath for raw block volume here as used inside podSpec by getDeploymentSpec(). + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 1mb test data file %v", testdataFile)) + op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By(fmt.Sprintf("Write and read data on raw volume attached to: %v at path %v "+ + "before enabling migration", pod.Name, pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyIOOnRawBlockVolume(namespace, pod.Name, pvc1_devicePath, testdataFile, 0, 1) + + ginkgo.By("Enabling CSIMigration and CSIMigrationvSphere feature gates on kube-controller-manager") + err = toggleCSIMigrationFeatureGatesOnKubeControllerManager(ctx, client, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + kcmMigEnabled = true + + ginkgo.By("Waiting for migration related annotations on PV/PVCs created before migration") + waitForMigAnnotationsPvcPvLists(ctx, client, vcpPvcsPreMig, vcpPvsPreMig, true, migrationEnabledByDefault) + + ginkgo.By("Verify CnsVSphereVolumeMigration crds and CNS volume metadata on pvc created before migration") + verifyCnsVolumeMetadataAndCnsVSphereVolumeMigrationCrdForPvcs(ctx, client, vcpPvcsPreMig) + + ginkgo.By("Enable CSI migration feature gates on kublets on k8s nodes") + toggleCSIMigrationFeatureGatesOnK8snodes(ctx, client, true, namespace) + kubectlMigEnabled = true + + // After migration, verify the original data on VCP PVC pvc1 + ginkgo.By(fmt.Sprintf("Verify previously written data on migrated raw volume attached to: %v at path %v", + pod.Name, pod.Spec.Containers[0].VolumeDevices[0].DevicePath)) + verifyDataFromRawBlockVolume(namespace, pod.Name, pvc1_devicePath, testdataFile, 0, 1) + + ginkgo.By("Creating VCP PVC pvc2 with raw block volumemode post migration") + pvc2, err := fpv.CreatePVC(client, namespace, vcpPvcSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to create pvc with err: %v", err)) + vcpPvcsPostMig = append(vcpPvcsPostMig, pvc2) + + ginkgo.By("Waiting for all claims created post migration to be in bound state") + vcpPvsPostMig, err = fpv.WaitForPVClaimBoundPhase(client, vcpPvcsPostMig, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify annotations on PV/PVCs created post migration") + waitForMigAnnotationsPvcPvLists(ctx, client, vcpPvcsPostMig, vcpPvsPostMig, false, migrationEnabledByDefault) + + ginkgo.By("Wait and verify CNS entries for all CNS volumes created post migration " + + "along with their respective CnsVSphereVolumeMigration CRDs") + verifyCnsVolumeMetadataAndCnsVSphereVolumeMigrationCrdForPvcs(ctx, client, vcpPvcsPostMig) + + dep1, err = client.AppsV1().Deployments(namespace).Get(ctx, dep1.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pods, err = fdep.GetPodsForDeployment(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pod = pods.Items[0] + rep := dep1.Spec.Replicas + *rep = 0 + dep1.Spec.Replicas = rep + ginkgo.By("Scale down deployment to 0 replica") + dep1, err = client.AppsV1().Deployments(namespace).Update(ctx, dep1, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitForPodNotFoundInNamespace(client, pod.Name, namespace, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + pvclaims := []*v1.PersistentVolumeClaim{pvc1, pvc2} + var volumeDevices = make([]v1.VolumeDevice, len(pvclaims)) + var volumes = make([]v1.Volume, len(pvclaims)) + for index, pvclaim := range pvclaims { + volumename := fmt.Sprintf("volume%v", index+1) + volumeDevices[index] = v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename} + volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} + } + dep1, err = client.AppsV1().Deployments(namespace).Get(ctx, dep1.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dep1.Spec.Template.Spec.Containers[0].VolumeDevices = volumeDevices + dep1.Spec.Template.Spec.Volumes = volumes + *rep = 1 + dep1.Spec.Replicas = rep + ginkgo.By("Update deployment to use pvc1 and pvc2") + dep1, err = client.AppsV1().Deployments(namespace).Update(ctx, dep1, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fdep.WaitForDeploymentComplete(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + dep1, err = client.AppsV1().Deployments(namespace).Get(ctx, dep1.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pods, err = wait4DeploymentPodsCreation(client, dep1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods.Items)).NotTo(gomega.BeZero()) + pod = pods.Items[0] + err = fpod.WaitTimeoutForPodReadyInNamespace(client, pod.Name, namespace, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify CNS entries for all CNS volumes created post migration " + + "along with their respective CnsVSphereVolumeMigration CRDs") + verifyCnsVolumeMetadataAndCnsVSphereVolumeMigrationCrdForPvcs(ctx, client, + []*v1.PersistentVolumeClaim{pvc1, pvc2}) + + // Write and read some data on VCP PVC pvc2 + volumeIndex++ + pvc2_devicePath := fmt.Sprintf("%v%v", pod_devicePathPrefix, volumeIndex) + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile = fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 1mb test data file %v", testdataFile)) + op, err = exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=1M", "count=1").Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + fmt.Println(op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + verifyIOOnRawBlockVolume(namespace, pod.Name, pvc2_devicePath, testdataFile, 0, 1) + + ginkgo.By("Scale down deployment to 0 replica") + dep1, err = client.AppsV1().Deployments(namespace).Get(ctx, dep1.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + *rep = 0 + dep1.Spec.Replicas = rep + _, err = client.AppsV1().Deployments(namespace).Update(ctx, dep1, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpod.WaitForPodNotFoundInNamespace(client, pod.Name, namespace, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + framework.Logf("Delete deployment set") + err := client.AppsV1().Deployments(namespace).Delete(ctx, dep1.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait and verify CNS entries for all CNS volumes created post migration " + + "along with their respective CnsVSphereVolumeMigration CRDs") + verifyCnsVolumeMetadataAndCnsVSphereVolumeMigrationCrdForPvcs(ctx, client, + []*v1.PersistentVolumeClaim{pvc1, pvc2}) + }) }) // waitForCnsVSphereVolumeMigrationCrd waits for CnsVSphereVolumeMigration crd to be created for the given volume path @@ -1585,7 +2014,7 @@ func createPodWithMultipleVolsVerifyVolMounts(ctx context.Context, client client "Volume is not attached to the node volHandle: %s, vmUUID: %s", volHandle, vmUUID) ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1794,7 +2223,13 @@ func scaleDownNDeleteStsDeploymentsInNamespace(ctx context.Context, c clientset. gomega.Expect(err).NotTo(gomega.HaveOccurred()) deletePolicy := metav1.DeletePropagationForeground err = c.AppsV1().Deployments(ns).Delete(ctx, dep.Name, metav1.DeleteOptions{PropagationPolicy: &deletePolicy}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + if apierrors.IsNotFound(err) { + return + } else { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } } } diff --git a/tests/e2e/vmc_create_gc.go b/tests/e2e/vmc_create_gc.go index 5b8bc29f5d..a9c7014337 100644 --- a/tests/e2e/vmc_create_gc.go +++ b/tests/e2e/vmc_create_gc.go @@ -17,6 +17,9 @@ limitations under the License. package e2e import ( + "fmt" + "os" + ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/framework" @@ -42,6 +45,11 @@ var _ = ginkgo.Describe("Create GC", func() { ginkgo.It("[vmc] Create GC using devops user", func() { + tkgImageName := os.Getenv(envTKGImage) + if tkgImageName == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envTKGImage)) + } + ginkgo.By("Get WCP session id") gomega.Expect((e2eVSphere.Config.Global.VmcDevopsUser)).NotTo(gomega.BeEmpty(), "Devops user is not set") wcpToken := getWCPSessionId(vmcWcpHost, e2eVSphere.Config.Global.VmcDevopsUser, @@ -49,7 +57,7 @@ var _ = ginkgo.Describe("Create GC", func() { framework.Logf("vmcWcpHost %s", vmcWcpHost) ginkgo.By("Creating Guest Cluster with Devops User") - createGC(vmcWcpHost, wcpToken) + createGC(vmcWcpHost, wcpToken, tkgImageName, devopsTKG) ginkgo.By("Validate the Guest Cluster is up and running") err := getGC(vmcWcpHost, wcpToken, devopsTKG) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -66,6 +74,11 @@ var _ = ginkgo.Describe("Create GC", func() { ginkgo.It("[vmc] Create GC using cloudadmin user", func() { + tkgImageName := os.Getenv(envTKGImage) + if tkgImageName == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envTKGImage)) + } + ginkgo.By("Get WCP session id") gomega.Expect((e2eVSphere.Config.Global.VmcCloudUser)).NotTo(gomega.BeEmpty(), "VmcCloudUser is not set") wcpToken := getWCPSessionId(vmcWcpHost, e2eVSphere.Config.Global.VmcCloudUser, @@ -73,7 +86,7 @@ var _ = ginkgo.Describe("Create GC", func() { framework.Logf("vmcWcpHost %s", vmcWcpHost) ginkgo.By("Creating Guest Cluster with cloudadmin User") - createGC(vmcWcpHost, wcpToken) + createGC(vmcWcpHost, wcpToken, tkgImageName, cloudadminTKG) ginkgo.By("Validate the Guest Cluster is up and running") err := getGC(vmcWcpHost, wcpToken, cloudadminTKG) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/vmc_csi_deployments.go b/tests/e2e/vmc_csi_deployments.go index 9ca41d522a..031c783459 100644 --- a/tests/e2e/vmc_csi_deployments.go +++ b/tests/e2e/vmc_csi_deployments.go @@ -92,7 +92,7 @@ var _ = ginkgo.Describe("[vmc-gc] Deploy, Update and Scale Deployments", func() 1. Create Storage Class and PVC 2. Deploy nginx pods with volume 3. Update the nginx deployment pods, update the nginx image from - k8s.gcr.io/nginx-slim:0.8 to k8s.gcr.io/nginx-slim:0.9 + registry.k8s.io/nginx-slim:0.8 to registry.k8s.io/nginx-slim:0.9 4. Wait for some time and verify the update is successful 5. Scale dowm the deployment to 0 replicas 6. Scale up the deployment to 1 replicas and verify all the pods should be up and running diff --git a/tests/e2e/volume_health_test.go b/tests/e2e/volume_health_test.go index ba6376ba63..77e407b288 100644 --- a/tests/e2e/volume_health_test.go +++ b/tests/e2e/volume_health_test.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -83,6 +84,7 @@ var _ = ginkgo.Describe("Volume health check", func() { vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort if supervisorCluster { deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) } if pvc != nil { if hostIP != "" { @@ -112,6 +114,7 @@ var _ = ginkgo.Describe("Volume health check", func() { if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } waitForAllHostsToBeUp(ctx, &e2eVSphere) }) @@ -3022,7 +3025,7 @@ var _ = ginkgo.Describe("Volume health check", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) cmd := []string{"rollout", "restart", "statefulset.apps/" + statefulset.Name, "--namespace=" + namespace} - framework.RunKubectlOrDie(namespace, cmd...) + e2ekubectl.RunKubectlOrDie(namespace, cmd...) // Wait for the StatefulSet Pods to be up and Running num_csi_pods := len(list_of_pods) diff --git a/tests/e2e/volume_provisioning_with_level5_topology.go b/tests/e2e/volume_provisioning_with_level5_topology.go index 6378ec271d..bec1992c6f 100644 --- a/tests/e2e/volume_provisioning_with_level5_topology.go +++ b/tests/e2e/volume_provisioning_with_level5_topology.go @@ -154,7 +154,8 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta */ ginkgo.It("Provisioning volume when no topology details specified in storage class "+ - "and using default pod management policy for statefulset", func() { + "and using default pod management policy for statefulset", ginkgo.Label(p0, topology, block, + vanilla, level5), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Creating StorageClass when no topology details are specified using WFC Binding mode @@ -190,13 +191,15 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta // Verify PV node affinity and that the PODS are running on appropriate nodes ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate node") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset to 0 replicas replicas -= 3 ginkgo.By("Scale down statefulset replica count to 0") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -250,7 +253,7 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta *(statefulset.Spec.Replicas) = 3 statefulset.Spec.PodManagementPolicy = apps.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = sc.Name + Spec.StorageClassName = &sc.Name CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -265,28 +268,33 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta // Verify PV node affinity and that the PODS are running on appropriate node ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate nodes") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, - statefulset, namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, + statefulset, namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale up statefulset replica count to 5 replicas += 5 ginkgo.By("Scale up statefulset replica count to 5") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset replica count to 1 replicas -= 1 ginkgo.By("Scale down statefulset replica count to 1") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify newly created PV node affinity details and that the new PODS are running on appropriate nodes ginkgo.By("Verify newly created PV node affinity details and that the new PODS are running on appropriate nodes") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset replicas to 0 replicas = 0 ginkgo.By("Scale down statefulset replica count to 0") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -338,7 +346,7 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta *(statefulset.Spec.Replicas) = 3 statefulset.Spec.PodManagementPolicy = apps.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = sc.Name + Spec.StorageClassName = &sc.Name ginkgo.By("Creating statefulset") CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -356,25 +364,29 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta appropriate node as specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running " + "on appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale up statefulset replicas to 5 replicas += 5 ginkgo.By("Scale up statefulset replica count to 5") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) /* Verify newly created PV node affinity and that the news PODS are running on appropriate node as specified in the allowed topologies of SC */ ginkgo.By("Verify newly created PV node affinity and that the news PODS " + "are running on appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset replicas to 0 replicas = 0 ginkgo.By("Scale down statefulset replica count to 0") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -444,30 +456,35 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta node as specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running " + "on appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale up statefulset replicas to 5 replicas += 5 ginkgo.By("Scale up statefulset replica count to 5") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset replicas to 1 replicas -= 1 ginkgo.By("Scale down statefulset replica count to 1") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) /* "Verify newly created PV node affinity and that the new PODS are running on appropriate node as specified in the allowed topologies of SC */ ginkgo.By("Verify newly created PV node affinity and that the new PODS are running " + "on appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset replicas to 0 replicas = 0 ginkgo.By("Scale down statefulset replica count to 0") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -528,7 +545,7 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta *(statefulset.Spec.Replicas) = 3 statefulset.Spec.PodManagementPolicy = apps.ParallelPodManagement statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = storageclass.Name + Spec.StorageClassName = &storageclass.Name ginkgo.By("Creating statefulset") CreateStatefulSet(namespace, statefulset, client) replicas := *(statefulset.Spec.Replicas) @@ -546,30 +563,35 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta node as specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale up statefulset replicas to 5 replicas += 5 ginkgo.By("Scale up statefulset replica count to 5") - scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleUpStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset replicas to 1 replicas -= 1 ginkgo.By("Scale down statefulset replica count to 1") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) /* Verify newly created PV node affinity and that the new PODS are running on appropriate node as specified in the allowed topologies of SC */ ginkgo.By("Verify newly created PV node affinity and that the new PODS are running " + "on appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset replicas to 0 replicas = 0 ginkgo.By("Scale down statefulset replica count to 0") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -646,8 +668,9 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta node as specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running " + "on appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment, - namespace, allowedTopologyForSC, false) + err = verifyPVnodeAffinityAndPODnodedetailsForDeploymentSetsLevel5(ctx, client, deployment, + namespace, allowedTopologyForSC, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -671,7 +694,7 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta ginkgo.It("Provisioning volume when storage class specified with multiple labels "+ "without specifying datastore url and using default pod management policy "+ - "for statefulset", func() { + "for statefulset", ginkgo.Label(p2, topology, block, vanilla, level5), func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Get allowed topologies for Storage Class rack > (rack1,rack2,rack3) @@ -713,13 +736,15 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta as specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, - namespace, allowedTopologies, false) + err = verifyPVnodeAffinityAndPODnodedetailsForStatefulsetsLevel5(ctx, client, statefulset, + namespace, allowedTopologies, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Scale down statefulset to 0 replicas replicas -= 3 ginkgo.By("Scale down statefulset replica count to 0") - scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false) + err = scaleDownStatefulSetPod(ctx, client, statefulset, namespace, replicas, false, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -908,8 +933,9 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on appropriate " + "node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -993,8 +1019,9 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1120,8 +1147,9 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta as specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -1262,8 +1290,9 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Deleting Pod ginkgo.By("Deleting the Pod") @@ -1328,8 +1357,9 @@ var _ = ginkgo.Describe("[csi-topology-for-level5] Topology-Provisioning-For-Sta specified in the allowed topologies of SC */ ginkgo.By("Verify PV node affinity and that the PODS are running on " + "appropriate node as specified in the allowed topologies of SC") - verifyPVnodeAffinityAndPODnodedetailsFoStandalonePodLevel5(ctx, client, pod, namespace, - allowedTopologies) + err = verifyPVnodeAffinityAndPODnodedetailsForStandalonePodLevel5(ctx, client, pod, namespace, + allowedTopologies, false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Verify volume metadata for POD, PVC and PV ginkgo.By("Verify volume metadata for POD, PVC and PV") diff --git a/tests/e2e/vsan_stretched_cluster.go b/tests/e2e/vsan_stretched_cluster.go index 0b07e067aa..a149f92a8a 100644 --- a/tests/e2e/vsan_stretched_cluster.go +++ b/tests/e2e/vsan_stretched_cluster.go @@ -234,7 +234,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f }() ginkgo.By("Creating statefulset and deployment with volumes from the stretched datastore") statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, true, - false, 0, "") + false, 0, "", "") ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) replicas := *(statefulset.Spec.Replicas) csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) @@ -346,9 +346,9 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Creating statefulsets sts1 with replica count 1 and sts2 with 5 and wait for all" + "the replicas to be running") - statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web") + statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web", "") replicas1 := *(statefulset1.Spec.Replicas) - statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx") + statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx", "") ss2PodsBeforeScaleDown := fss.GetPodList(client, statefulset2) replicas2 := *(statefulset2.Spec.Replicas) @@ -748,7 +748,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f }() ginkgo.By("Creating statefulset and deployment with volumes from the stretched datastore") statefulset, deployment, _ := createStsDeployment(ctx, client, namespace, sc, true, - false, 0, "") + false, 0, "", "") ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) replicas := *(statefulset.Spec.Replicas) @@ -1743,7 +1743,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Creating statefulset and deployment with volumes from the stretched datastore") statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, true, - false, 0, "") + false, 0, "", "") ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) replicas := *(statefulset.Spec.Replicas) @@ -1855,9 +1855,9 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Creating statefulsets sts1 with replica count 1 and sts2 with 5 and wait for all" + "the replicas to be running") - statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web") + statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web", "") replicas1 := *(statefulset1.Spec.Replicas) - statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx") + statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx", "") ss2PodsBeforeScaleDown := fss.GetPodList(client, statefulset2) replicas2 := *(statefulset2.Spec.Replicas) @@ -1986,7 +1986,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, true, false, 0, "") + statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, true, false, 0, "", "") replicas := *(statefulset.Spec.Replicas) ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) // Scale down replicas of statefulset and verify CNS entries for volumes @@ -2072,9 +2072,9 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Creating statefulsets sts1 with replica count 1 and sts2 with 5 and wait for all" + "the replicas to be running") - statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web") + statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web", "") replicas1 := *(statefulset1.Spec.Replicas) - statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx") + statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx", "") ss2PodsBeforeScaleDown := fss.GetPodList(client, statefulset2) replicas2 := *(statefulset2.Spec.Replicas) @@ -2421,9 +2421,9 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Creating statefulsets sts1 with replica count 1 and sts2 with 5 and wait for all" + "the replicas to be running") - statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web") + statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web", "") replicas1 := *(statefulset1.Spec.Replicas) - statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx") + statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx", "") ss2PodsBeforeScaleDown := fss.GetPodList(client, statefulset2) replicas2 := *(statefulset2.Spec.Replicas) @@ -2571,9 +2571,9 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Creating statefulsets sts1 with replica count 1 and sts2 with 5 and wait for all" + "the replicas to be running") - statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web") + statefulset1, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, "web", "") replicas1 := *(statefulset1.Spec.Replicas) - statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx") + statefulset2, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 5, "web-nginx", "") ss2PodsBeforeScaleDown := fss.GetPodList(client, statefulset2) replicas2 := *(statefulset2.Spec.Replicas) @@ -2731,7 +2731,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f for i := 0; i < operationStormScale; i++ { statefulsetName := prefix1 + strconv.Itoa(i) framework.Logf("Creating statefulset: %s", statefulsetName) - statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, statefulsetName) + statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 1, statefulsetName, "") replicas1 = *(statefulset.Spec.Replicas) stsList = append(stsList, statefulset) } @@ -2740,7 +2740,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f for i := 0; i < operationStormScale; i++ { statefulsetName := prefix2 + strconv.Itoa(i) framework.Logf("Creating statefulset: %s", statefulsetName) - statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 2, statefulsetName) + statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, false, true, 2, statefulsetName, "") replicas2 = *(statefulset.Spec.Replicas) stsList = append(stsList, statefulset) } @@ -2946,7 +2946,7 @@ var _ = ginkgo.Describe("[vsan-stretch-vanilla] vsan stretched cluster tests", f ginkgo.By("Creating statefulset and deployment with volumes from the stretched datastore") statefulset, _, _ := createStsDeployment(ctx, client, namespace, sc, true, - false, 0, "") + false, 0, "", "") ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset) replicas := *(statefulset.Spec.Replicas) csipods, err := client.CoreV1().Pods(csiNs).List(ctx, metav1.ListOptions{}) diff --git a/tests/e2e/vsan_stretched_cluster_utils.go b/tests/e2e/vsan_stretched_cluster_utils.go index cfcb37a3e5..48d1c4156d 100644 --- a/tests/e2e/vsan_stretched_cluster_utils.go +++ b/tests/e2e/vsan_stretched_cluster_utils.go @@ -27,6 +27,7 @@ import ( "sync" "time" + ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/vmware/govmomi/find" vsan "github.com/vmware/govmomi/vsan" @@ -44,6 +45,7 @@ import ( fdep "k8s.io/kubernetes/test/e2e/framework/deployment" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" fss "k8s.io/kubernetes/test/e2e/framework/statefulset" "sigs.k8s.io/controller-runtime/pkg/client" @@ -83,6 +85,7 @@ func initialiseFdsVar(ctx context.Context) { // siteFailureInParallel causes site Failure in multiple hosts of the site in parallel func siteFailureInParallel(primarySite bool, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() siteFailover(primarySite) } @@ -266,6 +269,7 @@ func wait4AllK8sNodesToBeUp( // deletePodsInParallel deletes pods in a given namespace in parallel func deletePodsInParallel(client clientset.Interface, namespace string, pods []*v1.Pod, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() for _, pod := range pods { fpod.DeletePodOrFail(client, namespace, pod.Name) @@ -275,6 +279,7 @@ func deletePodsInParallel(client clientset.Interface, namespace string, pods []* // createPvcInParallel creates number of PVC in a given namespace in parallel func createPvcInParallel(client clientset.Interface, namespace string, diskSize string, sc *storagev1.StorageClass, ch chan *v1.PersistentVolumeClaim, lock *sync.Mutex, wg *sync.WaitGroup, volumeOpsScale int) { + defer ginkgo.GinkgoRecover() defer wg.Done() for i := 0; i < volumeOpsScale; i++ { pvc, err := createPVC(client, namespace, nil, diskSize, sc, "") @@ -324,6 +329,7 @@ func waitForPodsToBeInErrorOrRunning(c clientset.Interface, podName, namespace s // runCmdOnHostsInParallel runs command on multiple ESX in parallel func runCmdOnHostsInParallel(hostIP string, sshCmd string, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() op, err := runCommandOnESX("root", hostIP, sshCmd) framework.Logf(op) @@ -375,6 +381,7 @@ func toggleNetworkFailureParallel(hosts []string, causeNetworkFailure bool) { // deletePVCInParallel deletes PVC in a given namespace in parallel func deletePvcInParallel(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, namespace string, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() for _, pvclaim := range pvclaims { err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) @@ -385,6 +392,8 @@ func deletePvcInParallel(client clientset.Interface, pvclaims []*v1.PersistentVo // createPodsInParallel creates Pods in a given namespace in parallel func createPodsInParallel(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, ctx context.Context, lock *sync.Mutex, ch chan *v1.Pod, wg *sync.WaitGroup, volumeOpsScale int) { + + defer ginkgo.GinkgoRecover() defer wg.Done() for i := 0; i < volumeOpsScale; i++ { @@ -401,6 +410,7 @@ func createPodsInParallel(client clientset.Interface, namespace string, pvclaims // updatePvcLabelsInParallel updates the labels of pvc in a namespace in parallel func updatePvcLabelsInParallel(ctx context.Context, client clientset.Interface, namespace string, labels map[string]string, pvclaims []*v1.PersistentVolumeClaim, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() for _, pvc := range pvclaims { framework.Logf(fmt.Sprintf("Updating labels %+v for pvc %s in namespace %s", @@ -418,12 +428,15 @@ func updatePvcLabelsInParallel(ctx context.Context, client clientset.Interface, // updatePvLabelsInParallel updates the labels of pv in parallel func updatePvLabelsInParallel(ctx context.Context, client clientset.Interface, namespace string, labels map[string]string, persistentVolumes []*v1.PersistentVolume, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() for _, pv := range persistentVolumes { - framework.Logf(fmt.Sprintf("Updating labels %+v for pv %s in namespace %s", - labels, pv.Name, namespace)) + framework.Logf("Updating labels %+v for pv %s in namespace %s", + labels, pv.Name, namespace) + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) pv.Labels = labels - _, err := client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) + _, err = client.CoreV1().PersistentVolumes().Update(ctx, pv, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Error on updating pv labels is: %v", err) @@ -539,6 +552,7 @@ func changeLeaderOfContainerToComeUpOnMaster(ctx context.Context, client clients // the particular CSI container on the master node in parallel func invokeDockerPauseNKillOnContainerInParallel(sshClientConfig *ssh.ClientConfig, k8sMasterIp string, csiContainerName string, k8sVersion string, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() err := execDockerPauseNKillOnContainer(sshClientConfig, k8sMasterIp, csiContainerName, k8sVersion) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -588,12 +602,19 @@ func checkVmStorageCompliance(client clientset.Interface, storagePolicy string) // statefulset, deployment and volumes of statfulset created func createStsDeployment(ctx context.Context, client clientset.Interface, namespace string, sc *storagev1.StorageClass, isDeploymentRequired bool, modifyStsSpec bool, - replicaCount int32, stsName string) (*appsv1.StatefulSet, *appsv1.Deployment, []string) { + replicaCount int32, stsName string, + accessMode v1.PersistentVolumeAccessMode) (*appsv1.StatefulSet, *appsv1.Deployment, []string) { var pvclaims []*v1.PersistentVolumeClaim + if accessMode == "" { + // If accessMode is not specified, set the default accessMode. + accessMode = v1.ReadWriteOnce + } statefulset := GetStatefulSetFromManifest(namespace) framework.Logf("Creating statefulset") statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1]. - Annotations["volume.beta.kubernetes.io/storage-class"] = sc.Name + Spec.StorageClassName = &sc.Name + statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].Spec.AccessModes[0] = + accessMode if modifyStsSpec { statefulset.Name = stsName statefulset.Spec.Template.Labels["app"] = statefulset.Name @@ -630,7 +651,7 @@ func createStsDeployment(ctx context.Context, client clientset.Interface, namesp } if isDeploymentRequired { framework.Logf("Creating PVC") - pvclaim, err := createPVC(client, namespace, nil, diskSize, sc, "") + pvclaim, err := createPVC(client, namespace, nil, diskSize, sc, accessMode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) pvclaims = append(pvclaims, pvclaim) persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) @@ -681,7 +702,7 @@ func volumeLifecycleActions(ctx context.Context, client clientset.Interface, nam "Volume is not attached to the node volHandle: %s, vmUUID: %s", volHandle, vmUUID) framework.Logf("Verify the volume is accessible") - _, err = framework.LookForStringInPodExec(namespace, pod1.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod1.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -817,9 +838,11 @@ func scaleUpStsAndVerifyPodMetadata(ctx context.Context, client clientset.Interf _, err := e2eVSphere.getVMByUUID(ctx, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Disk is not attached to the node") - gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Disk is not attached") + if !rwxAccessMode { + isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Disk is not attached to the node") + gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Disk is not attached") + } framework.Logf("After scale up, verify the attached volumes match those in CNS Cache") err = verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) @@ -832,6 +855,7 @@ func scaleUpStsAndVerifyPodMetadata(ctx context.Context, client clientset.Interf // deleteCsiPodInParallel deletes csi pod present in csi namespace in parallel func deleteCsiPodInParallel(client clientset.Interface, pod *v1.Pod, namespace string, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() framework.Logf("Deleting the pod: %s", pod.Name) err := fpod.DeletePodWithWait(client, pod) @@ -877,6 +901,7 @@ func hostFailure(esxHost string, hostDown bool) { // scaleStsReplicaInParallel scales statefulset's replica up/down in parallel func scaleStsReplicaInParallel(client clientset.Interface, stsList []*appsv1.StatefulSet, regex string, replicas int32, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() for _, statefulset := range stsList { if strings.Contains(statefulset.Name, regex) { @@ -888,6 +913,7 @@ func scaleStsReplicaInParallel(client clientset.Interface, stsList []*appsv1.Sta // deletePvInParallel deletes PVs in parallel from k8s cluster func deletePvInParallel(client clientset.Interface, persistentVolumes []*v1.PersistentVolume, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() for _, pv := range persistentVolumes { framework.Logf("Deleting pv %s", pv.Name) @@ -901,6 +927,7 @@ func deletePvInParallel(client clientset.Interface, persistentVolumes []*v1.Pers func createStaticPvAndPvcInParallel(client clientset.Interface, ctx context.Context, fcdIDs []string, ch chan *v1.PersistentVolumeClaim, namespace string, wg *sync.WaitGroup, volumeOpsScale int) { + defer ginkgo.GinkgoRecover() defer wg.Done() staticPVLabels := make(map[string]string) for i := 0; i < volumeOpsScale; i++ { @@ -928,6 +955,7 @@ func createStaticPvAndPvcInParallel(client clientset.Interface, ctx context.Cont // using triggerFullSync() here func triggerFullSyncInParallel(ctx context.Context, client clientset.Interface, cnsOperatorClient client.Client, wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() defer wg.Done() err := waitForFullSyncToFinish(client, ctx, cnsOperatorClient) if err != nil { diff --git a/tests/e2e/vsphere.go b/tests/e2e/vsphere.go index 01af367db0..08668a2713 100644 --- a/tests/e2e/vsphere.go +++ b/tests/e2e/vsphere.go @@ -3,11 +3,13 @@ package e2e import ( "context" "encoding/json" + "errors" "fmt" "os" "reflect" "strconv" "strings" + "sync" "time" "github.com/davecgh/go-spew/spew" @@ -139,10 +141,16 @@ func (vs *vSphere) queryCNSVolumeSnapshotWithResult(fcdID string, } // verifySnapshotIsDeletedInCNS verifies the snapshotId's presence on CNS -func verifySnapshotIsDeletedInCNS(volumeId string, snapshotId string) error { +func verifySnapshotIsDeletedInCNS(volumeId string, snapshotId string, isMultiVcSetup bool) error { ginkgo.By(fmt.Sprintf("Invoking queryCNSVolumeSnapshotWithResult with VolumeID: %s and SnapshotID: %s", volumeId, snapshotId)) - querySnapshotResult, err := e2eVSphere.queryCNSVolumeSnapshotWithResult(volumeId, snapshotId) + var querySnapshotResult *cnstypes.CnsSnapshotQueryResult + var err error + if !isMultiVcSetup { + querySnapshotResult, err = e2eVSphere.queryCNSVolumeSnapshotWithResult(volumeId, snapshotId) + } else { + querySnapshotResult, err = multiVCe2eVSphere.queryCNSVolumeSnapshotWithResultInMultiVC(volumeId, snapshotId) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Task result is %+v", querySnapshotResult)) gomega.Expect(querySnapshotResult.Entries).ShouldNot(gomega.BeEmpty()) @@ -154,10 +162,16 @@ func verifySnapshotIsDeletedInCNS(volumeId string, snapshotId string) error { } // verifySnapshotIsCreatedInCNS verifies the snapshotId's presence on CNS -func verifySnapshotIsCreatedInCNS(volumeId string, snapshotId string) error { +func verifySnapshotIsCreatedInCNS(volumeId string, snapshotId string, isMultiVC bool) error { ginkgo.By(fmt.Sprintf("Invoking queryCNSVolumeSnapshotWithResult with VolumeID: %s and SnapshotID: %s", volumeId, snapshotId)) - querySnapshotResult, err := e2eVSphere.queryCNSVolumeSnapshotWithResult(volumeId, snapshotId) + var querySnapshotResult *cnstypes.CnsSnapshotQueryResult + var err error + if !isMultiVC { + querySnapshotResult, err = e2eVSphere.queryCNSVolumeSnapshotWithResult(volumeId, snapshotId) + } else { + querySnapshotResult, err = multiVCe2eVSphere.queryCNSVolumeSnapshotWithResultInMultiVC(volumeId, snapshotId) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Task result is %+v", querySnapshotResult)) gomega.Expect(querySnapshotResult.Entries).ShouldNot(gomega.BeEmpty()) @@ -448,7 +462,7 @@ func (vs *vSphere) waitForMetadataToBeDeleted(volumeID string, entityType string // waitForCNSVolumeToBeDeleted executes QueryVolume API on vCenter and verifies // volume entries are deleted from vCenter Database func (vs *vSphere) waitForCNSVolumeToBeDeleted(volumeID string) error { - err := wait.Poll(poll, pollTimeout, func() (bool, error) { + err := wait.Poll(poll, 2*pollTimeout, func() (bool, error) { queryResult, err := vs.queryCNSVolumeWithResult(volumeID) if err != nil { return true, err @@ -768,9 +782,14 @@ func (vs *vSphere) getVsanClusterResource(ctx context.Context, forceRefresh ...b } // getAllHostsIP reads cluster, gets hosts in it and returns IP array -func getAllHostsIP(ctx context.Context) []string { +func getAllHostsIP(ctx context.Context, forceRefresh ...bool) []string { var result []string - cluster := e2eVSphere.getVsanClusterResource(ctx) + refresh := false + if len(forceRefresh) > 0 { + refresh = forceRefresh[0] + } + + cluster := e2eVSphere.getVsanClusterResource(ctx, refresh) hosts, err := cluster.Hosts(ctx) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -783,7 +802,7 @@ func getAllHostsIP(ctx context.Context) []string { // getHostConnectionState reads cluster, gets hosts in it and returns connection state of host func getHostConnectionState(ctx context.Context, addr string) (string, error) { var state string - cluster := e2eVSphere.getVsanClusterResource(ctx) + cluster := e2eVSphere.getVsanClusterResource(ctx, true) hosts, err := cluster.Hosts(ctx) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1077,9 +1096,14 @@ func (vs *vSphere) verifyDatastoreMatch(volumeID string, dsUrls []string) { // cnsRelocateVolume relocates volume from one datastore to another using CNS relocate volume API func (vs *vSphere) cnsRelocateVolume(e2eVSphere vSphere, ctx context.Context, fcdID string, - dsRefDest vim25types.ManagedObjectReference) error { + dsRefDest vim25types.ManagedObjectReference, + waitForRelocateTaskToComplete ...bool) (*object.Task, error) { var pandoraSyncWaitTime int var err error + waitForTaskTocomplete := true + if len(waitForRelocateTaskToComplete) > 0 { + waitForTaskTocomplete = waitForRelocateTaskToComplete[0] + } if os.Getenv(envPandoraSyncWaitTime) != "" { pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1101,31 +1125,32 @@ func (vs *vSphere) cnsRelocateVolume(e2eVSphere vSphere, ctx context.Context, fc res, err := cnsmethods.CnsRelocateVolume(ctx, cnsClient, &req) framework.Logf("error is: %v", err) if err != nil { - return err - } - - task, err := object.NewTask(e2eVSphere.Client.Client, res.Returnval), nil - taskInfo, err := task.WaitForResult(ctx, nil) - framework.Logf("taskInfo: %v", taskInfo) - framework.Logf("error: %v", err) - if err != nil { - return err - } - taskResult, err := cns.GetTaskResult(ctx, taskInfo) - if err != nil { - return err + return nil, err } + task := object.NewTask(e2eVSphere.Client.Client, res.Returnval) + if waitForTaskTocomplete { + taskInfo, err := task.WaitForResult(ctx, nil) + framework.Logf("taskInfo: %v", taskInfo) + framework.Logf("error: %v", err) + if err != nil { + return nil, err + } + taskResult, err := cns.GetTaskResult(ctx, taskInfo) + if err != nil { + return nil, err + } - framework.Logf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime) - time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + framework.Logf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) - cnsRelocateVolumeRes := taskResult.GetCnsVolumeOperationResult() + cnsRelocateVolumeRes := taskResult.GetCnsVolumeOperationResult() - if cnsRelocateVolumeRes.Fault != nil { - err = fmt.Errorf("failed to relocate volume=%+v", cnsRelocateVolumeRes.Fault) - return err + if cnsRelocateVolumeRes.Fault != nil { + err = fmt.Errorf("failed to relocate volume=%+v", cnsRelocateVolumeRes.Fault) + return nil, err + } } - return nil + return task, nil } // fetchDsUrl4CnsVol executes query CNS volume to get the datastore @@ -1179,3 +1204,87 @@ func (vs *vSphere) deleteCNSvolume(volumeID string, isDeleteDisk bool) (*cnstype } return res, nil } + +// reconfigPolicy reconfigures given policy on the given volume +func (vs *vSphere) reconfigPolicy(ctx context.Context, volumeID string, profileID string) error { + cnsClient, err := newCnsClient(ctx, vs.Client.Client) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + CnsVolumeManagerInstance := vim25types.ManagedObjectReference{ + Type: "CnsVolumeManager", + Value: "cns-volume-manager", + } + req := cnstypes.CnsReconfigVolumePolicy{ + This: CnsVolumeManagerInstance, + VolumePolicyReconfigSpecs: []cnstypes.CnsVolumePolicyReconfigSpec{ + { + VolumeId: cnstypes.CnsVolumeId{Id: volumeID}, + Profile: []vim25types.BaseVirtualMachineProfileSpec{ + &vim25types.VirtualMachineDefinedProfileSpec{ + ProfileId: profileID, + }, + }, + }, + }, + } + res, err := cnsmethods.CnsReconfigVolumePolicy(ctx, cnsClient, &req) + if err != nil { + return err + } + task := object.NewTask(vs.Client.Client, res.Returnval) + taskInfo, err := cns.GetTaskInfo(ctx, task) + if err != nil { + return err + } + taskResult, err := cns.GetTaskResult(ctx, taskInfo) + if err != nil { + return err + } + if taskResult == nil { + return errors.New("TaskInfo result is empty") + } + reconfigVolumeOperationRes := taskResult.GetCnsVolumeOperationResult() + if reconfigVolumeOperationRes == nil { + return errors.New("cnsreconfigpolicy operation result is empty") + } + if reconfigVolumeOperationRes.Fault != nil { + return errors.New("cnsreconfigpolicy operation fault: " + reconfigVolumeOperationRes.Fault.LocalizedMessage) + } + framework.Logf("reconfigpolicy on volume %v with policy %v is successful", volumeID, profileID) + return nil +} + +// cnsRelocateVolumeInParallel relocates volume in parallel from one datastore to another +// using CNS API +func cnsRelocateVolumeInParallel(e2eVSphere vSphere, ctx context.Context, fcdID string, + dsRefDest vim25types.ManagedObjectReference, waitForRelocateTaskToComplete bool, + wg *sync.WaitGroup) { + defer ginkgo.GinkgoRecover() + defer wg.Done() + _, err := e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, fcdID, dsRefDest, waitForRelocateTaskToComplete) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + +} + +// waitForCNSTaskToComplete wait for CNS task to complete +// and gets the result and checks if any fault has occurred +func waitForCNSTaskToComplete(ctx context.Context, task *object.Task) *vim25types.LocalizedMethodFault { + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + + taskInfo, err := task.WaitForResult(ctx, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + taskResult, err := cns.GetTaskResult(ctx, taskInfo) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("Sleeping for %v seconds to allow CNS to sync with pandora", pandoraSyncWaitTime) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + cnsTaskRes := taskResult.GetCnsVolumeOperationResult() + return cnsTaskRes.Fault +} diff --git a/tests/e2e/vsphere_file_volume_basic_mount.go b/tests/e2e/vsphere_file_volume_basic_mount.go index 7a8d80f447..e1d3b2ad1e 100644 --- a/tests/e2e/vsphere_file_volume_basic_mount.go +++ b/tests/e2e/vsphere_file_volume_basic_mount.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" admissionapi "k8s.io/pod-security-admission/api" ) @@ -225,7 +226,7 @@ var _ = ginkgo.Describe("[csi-file-vanilla] Verify Two Pods can read write files //Create file1.txt on Pod1 ginkgo.By("Create file1.txt on Pod1") - err = framework.CreateEmptyFileOnPod(namespace, pod1.Name, filePath1) + err = e2eoutput.CreateEmptyFileOnPod(namespace, pod1.Name, filePath1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) //Write data on file1.txt on Pod1 @@ -281,7 +282,7 @@ var _ = ginkgo.Describe("[csi-file-vanilla] Verify Two Pods can read write files gomega.Expect(output == data).To(gomega.BeTrue(), "Pod2 is able to read file1 written by Pod1") //Create a file file2.txt from Pod2 - err = framework.CreateEmptyFileOnPod(namespace, pod2.Name, filePath2) + err = e2eoutput.CreateEmptyFileOnPod(namespace, pod2.Name, filePath2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) //Write to the file @@ -360,7 +361,7 @@ func invokeTestForCreateFileVolumeAndMount(f *framework.Framework, client client //Create file1.txt on Pod1 ginkgo.By("Create file1.txt on Pod1") - err = framework.CreateEmptyFileOnPod(namespace, pod1.Name, filePath1) + err = e2eoutput.CreateEmptyFileOnPod(namespace, pod1.Name, filePath1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) //Write data on file1.txt on Pod1 @@ -426,7 +427,7 @@ func invokeTestForCreateFileVolumeAndMount(f *framework.Framework, client client gomega.Expect(output == data).To(gomega.BeTrue(), "Pod2 is able to read file1 written by Pod1") //Create a file file2.txt from Pod2 - err = framework.CreateEmptyFileOnPod(namespace, pod2.Name, filePath2) + err = e2eoutput.CreateEmptyFileOnPod(namespace, pod2.Name, filePath2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) //Write to the file diff --git a/tests/e2e/vsphere_shared_datastore.go b/tests/e2e/vsphere_shared_datastore.go index 8f2c96a6c4..54b4b2547d 100644 --- a/tests/e2e/vsphere_shared_datastore.go +++ b/tests/e2e/vsphere_shared_datastore.go @@ -73,6 +73,15 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ framework.Failf("Unable to find ready and schedulable Node") } }) + ginkgo.AfterEach(func() { + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + }) // Shared datastore should be provisioned successfully. ginkgo.It("Verify dynamic provisioning of PV passes with user specified shared datastore and "+ diff --git a/tests/e2e/vsphere_volume_disksize.go b/tests/e2e/vsphere_volume_disksize.go index 88d1e2b701..e5d91b5729 100644 --- a/tests/e2e/vsphere_volume_disksize.go +++ b/tests/e2e/vsphere_volume_disksize.go @@ -75,10 +75,12 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] ginkgo.AfterEach(func() { if supervisorCluster { deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } }) diff --git a/tests/e2e/vsphere_volume_expansion.go b/tests/e2e/vsphere_volume_expansion.go index e87d45dbe8..29a6e3f2aa 100644 --- a/tests/e2e/vsphere_volume_expansion.go +++ b/tests/e2e/vsphere_volume_expansion.go @@ -33,9 +33,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" admissionapi "k8s.io/pod-security-admission/api" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/test/e2e/framework" - cnstypes "github.com/vmware/govmomi/cns/types" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -43,9 +40,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/test/e2e/framework" fdep "k8s.io/kubernetes/test/e2e/framework/deployment" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" fvolume "k8s.io/kubernetes/test/e2e/framework/volume" cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1" @@ -57,6 +58,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { f := framework.NewDefaultFramework("volume-expansion") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + framework.TestContext.DeleteNamespace = true var ( client clientset.Interface namespace string @@ -123,10 +125,12 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { if supervisorCluster { ginkgo.By("Delete Resource quota") deleteResourceQuota(client, namespace) + dumpSvcNsEventsOnTestFailure(client, namespace) } if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() setResourceQuota(svcClient, svNamespace, defaultrqLimit) + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } }) @@ -173,7 +177,29 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.It("[csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized] "+ "Verify volume expansion with initial filesystem before expansion", func() { - invokeTestForVolumeExpansionWithFilesystem(f, client, namespace, "", storagePolicyName, profileID) + invokeTestForVolumeExpansionWithFilesystem(f, client, namespace, ext4FSType, "", storagePolicyName, profileID) + }) + + // Test to verify offline volume expansion workflow with xfs filesystem. + + // Steps + // 1. Create StorageClass with fstype set to xfs and allowVolumeExpansion set to true. + // 2. Create PVC which uses the StorageClass created in step 1. + // 3. Wait for PV to be provisioned. + // 4. Wait for PVC's status to become Bound. + // 5. Create pod using PVC on specific node. + // 6. Wait for Disk to be attached to the node. + // 7. Detach the volume. + // 8. Modify PVC's size to trigger offline volume expansion. + // 9. Create pod again using PVC on specific node. + // 10. Wait for Disk to be attached to the node. + // 11. Wait for file system resize to complete. + // 12. Delete pod and Wait for Volume Disk to be detached from the Node. + // 13. Delete PVC, PV and Storage Class. + + ginkgo.It("[csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized] "+ + "Verify offline volume expansion workflow with xfs filesystem", func() { + invokeTestForVolumeExpansionWithFilesystem(f, client, namespace, xfsFSType, xfsFSType, storagePolicyName, profileID) }) // Test to verify volume expansion is not supported if allowVolumeExpansion @@ -287,7 +313,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVSANDatastoreURL, storagePolicyName, namespace) + f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { @@ -301,7 +327,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create Pod using the above PVC") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete Pod. @@ -329,6 +355,64 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { increaseSizeOfPvcAttachedToPod(f, client, namespace, pvclaim, pod) }) + /* + Test to verify online volume expansion workflow with xfs filesystem + + 1. Create StorageClass with fstype set to xfs and allowVolumeExpansion set to true. + 2. Create PVC which uses the StorageClass created in step 1. + 3. Wait for PV to be provisioned. + 4. Wait for PVC's status to become Bound and note down the size + 5. Create a Pod using the above created PVC + 6. Modify PVC's size to trigger online volume expansion + 7. verify the PVC status will change to "FilesystemResizePending". Wait till the status is removed + 8. Verify the resized PVC by doing CNS query + 9. Make sure data is intact on the PV mounted on the pod + 10. Make sure file system has increased + */ + ginkgo.It("[csi-block-vanilla] [csi-block-vanilla-parallelized] "+ + "Verify online volume expansion workflow with xfs filesystem", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var pv *v1.PersistentVolume + var volHandle string + + ginkgo.By("Create StorageClass with fstype set to xfs and allowVolumeExpansion set to true, Create PVC") + sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) + volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( + f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, xfsFSType) + + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create Pod using the above PVC") + pod, _ := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, xfsFSType) + + defer func() { + // Delete Pod. + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err := fpod.DeletePodWithWait(client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client, + pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(isDiskDetached).To(gomega.BeTrue(), + fmt.Sprintf("Volume %q is not detached from the node %q", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) + }() + + ginkgo.By("Increase PVC size and verify online volume resize") + increaseSizeOfPvcAttachedToPod(f, client, namespace, pvclaim, pod) + }) + // Verify online volume expansion on static volume. // // 1. Create FCD and wait for fcd to allow syncing with pandora. @@ -362,7 +446,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD") - pod, _ := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, _ := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete Pod. @@ -400,7 +484,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { @@ -414,7 +498,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete POD @@ -470,7 +554,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer cancel() volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -483,7 +567,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete POD @@ -538,7 +622,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { var expectedErrMsg string volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -551,7 +635,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") //Fetch original FileSystemSize ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") @@ -668,7 +752,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { //featureEnabled := isFssEnabled(vcAddress, cnsNewSyncFSS) volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -681,7 +765,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create POD") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") //Fetch original FileSystemSize ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") @@ -788,7 +872,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -801,7 +885,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create Pod using the above PVC") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") //Fetch original FileSystemSize ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") @@ -926,7 +1010,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared VVOL datastore with allowVolumeExpansion set to true, Create PVC") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVVOLdatastoreURL, storagePolicyName, namespace) + f, client, sharedVVOLdatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -945,7 +1029,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { if vanillaCluster || guestCluster { ginkgo.By("Create POD using the above PVC") - pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") } defer func() { @@ -1016,7 +1100,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared NFS datastore with allowVolumeExpansion set to true") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedNFSdatastoreURL, storagePolicyName, namespace) + f, client, sharedNFSdatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1035,7 +1119,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { if vanillaCluster || guestCluster { ginkgo.By("Create POD using the above PVC") - pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") } defer func() { @@ -1107,7 +1191,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared VMFS datastore with allowVolumeExpansion set to true") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVMFSdatastoreURL, storagePolicyName, namespace) + f, client, sharedVMFSdatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1126,7 +1210,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { if vanillaCluster || guestCluster { ginkgo.By("Create POD using the above PVC") - pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") } defer func() { @@ -1285,7 +1369,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1361,7 +1445,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass2 = createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVSANDatastoreURL, storagePolicyName2, namespace) + f, client, sharedVSANDatastoreURL, storagePolicyName2, namespace, ext4FSType) defer func() { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass2.Name, *metav1.NewDeleteOptions(0)) @@ -1386,7 +1470,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(err).To(gomega.HaveOccurred()) ginkgo.By("Create Pod using the above PVC") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete POD @@ -1452,7 +1536,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1550,7 +1634,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1603,7 +1687,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { var err error volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, "", storagePolicyName, namespace) + f, client, "", storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1699,7 +1783,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1757,7 +1841,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVSANDatastoreURL, storagePolicyName, namespace) + f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { @@ -1771,7 +1855,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create Pod using the above PVC") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete POD @@ -1830,7 +1914,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { } ginkgo.By("re-create Pod using the same PVC") - pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) @@ -1879,7 +1963,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVSANDatastoreURL, storagePolicyName, namespace) + f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { @@ -1895,7 +1979,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create Pod using the above PVC") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete POD @@ -1987,7 +2071,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared VSAN datastore with allowVolumeExpansion set to true") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVSANDatastoreURL, storagePolicyName, namespace) + f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -2000,7 +2084,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { }() ginkgo.By("Create Pod using the above PVC") - pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete POD ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) @@ -2110,7 +2194,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared VSAN datastore with allowVolumeExpansion set to true") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVSANDatastoreURL, storagePolicyName, namespace) + f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -2150,7 +2234,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { svcCsiDeployment = updateDeploymentReplica(client, 1, vSphereCSIControllerPodNamePrefix, csiSystemNamespace) ginkgo.By("Create Pod using the above PVC") - pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID = createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") defer func() { // Delete POD @@ -2295,7 +2379,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) _, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( - f, client, sharedVSANDatastoreURL, storagePolicyName, namespace) + f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) defer func() { if !supervisorCluster { @@ -2497,10 +2581,10 @@ func createStaticPVC(ctx context.Context, f *framework.Framework, // allowVolumeExpansion set to true and Creates PVC. Waits till PV, PVC // are in bound. func createSCwithVolumeExpansionTrueAndDynamicPVC(f *framework.Framework, - client clientset.Interface, dsurl string, storagePolicyName string, - namespace string) (string, *v1.PersistentVolumeClaim, *v1.PersistentVolume, *storagev1.StorageClass) { + client clientset.Interface, dsurl string, storagePolicyName string, namespace string, + fstype string) (string, *v1.PersistentVolumeClaim, *v1.PersistentVolume, *storagev1.StorageClass) { scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + scParameters[scParamFsType] = fstype // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") @@ -2553,7 +2637,7 @@ func createSCwithVolumeExpansionTrueAndDynamicPVC(f *framework.Framework, // createPODandVerifyVolumeMount this method creates Pod and verifies VolumeMount func createPODandVerifyVolumeMount(ctx context.Context, f *framework.Framework, client clientset.Interface, - namespace string, pvclaim *v1.PersistentVolumeClaim, volHandle string) (*v1.Pod, string) { + namespace string, pvclaim *v1.PersistentVolumeClaim, volHandle string, expectedContent string) (*v1.Pod, string) { // Create a Pod to use this PVC, and verify volume has been attached ginkgo.By("Creating pod to attach PV to the node") pod, err := createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) @@ -2579,8 +2663,8 @@ func createPODandVerifyVolumeMount(ctx context.Context, f *framework.Framework, "Volume is not attached to the node volHandle: %s, vmUUID: %s", volHandle, vmUUID) ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, - []string{"/bin/cat", "/mnt/volume1/fstype"}, "", time.Minute) + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, + []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return pod, vmUUID @@ -2591,10 +2675,12 @@ func increaseSizeOfPvcAttachedToPod(f *framework.Framework, client clientset.Int namespace string, pvclaim *v1.PersistentVolumeClaim, pod *v1.Pod) { var originalSizeInMb int64 var err error - //Fetch original FileSystemSize - ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") - originalSizeInMb, err = getFSSizeMb(f, pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + //Fetch original FileSystemSize if not raw block volume + if *pvclaim.Spec.VolumeMode != v1.PersistentVolumeBlock { + ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") + originalSizeInMb, err = getFSSizeMb(f, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } //resize PVC // Modify PVC spec to trigger volume expansion @@ -2614,18 +2700,22 @@ func increaseSizeOfPvcAttachedToPod(f *framework.Framework, client clientset.Int pvcConditions := pvclaim.Status.Conditions expectEqual(len(pvcConditions), 0, "pvc should not have conditions") - var fsSize int64 - ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err = getFSSizeMb(f, pod) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("File system size after expansion : %v", fsSize) - // Filesystem size may be smaller than the size of the block volume - // so here we are checking if the new filesystem size is greater than - // the original volume size as the filesystem is formatted for the - // first time - gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalSizeInMb), - fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)) - ginkgo.By("File system resize finished successfully") + if *pvclaim.Spec.VolumeMode != v1.PersistentVolumeBlock { + var fsSize int64 + ginkgo.By("Verify filesystem size for mount point /mnt/volume1") + fsSize, err = getFSSizeMb(f, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("File system size after expansion : %v", fsSize) + // Filesystem size may be smaller than the size of the block volume + // so here we are checking if the new filesystem size is greater than + // the original volume size as the filesystem is formatted for the + // first time + gomega.Expect(fsSize).Should(gomega.BeNumerically(">", originalSizeInMb), + fmt.Sprintf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)) + ginkgo.By("File system resize finished successfully") + } else { + ginkgo.By("Volume resize finished successfully") + } } func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Interface, @@ -2781,7 +2871,7 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2838,14 +2928,15 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter } func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client clientset.Interface, - namespace string, expectedContent string, storagePolicyName string, profileID string) { + namespace string, fstype string, expectedContent string, storagePolicyName string, profileID string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ginkgo.By("Invoking Test for Volume Expansion 2") scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + scParameters[scParamFsType] = fstype // Create Storage class and PVC - ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") + ginkgo.By(fmt.Sprintf("Creating Storage Class with %s filesystem and PVC with allowVolumeExpansion = true", + fstype)) var storageclass *storagev1.StorageClass var pvclaim *v1.PersistentVolumeClaim var err error @@ -2904,7 +2995,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2998,7 +3089,7 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify after expansion the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3434,7 +3525,7 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -3641,7 +3732,7 @@ func getFSSizeMb(f *framework.Framework, pod *v1.Pod) (int64, error) { if supervisorCluster { namespace := getNamespaceToRunTests(f) cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} - output = framework.RunKubectlOrDie(namespace, cmd...) + output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, ext4FSType)).NotTo(gomega.BeFalse()) } else { output, _, err = fvolume.PodExec(f, pod, "df -T -m | grep /mnt/volume1") @@ -3782,7 +3873,7 @@ func offlineVolumeExpansionOnSupervisorPVC(client clientset.Interface, f *framew gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Create Pod using the above PVC") - pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle) + pod, vmUUID := createPODandVerifyVolumeMount(ctx, f, client, namespace, pvclaim, volHandle, "") ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) diff --git a/tests/e2e/vsphere_volume_fsgroup.go b/tests/e2e/vsphere_volume_fsgroup.go index 4e9ab8bd7e..165f8be016 100644 --- a/tests/e2e/vsphere_volume_fsgroup.go +++ b/tests/e2e/vsphere_volume_fsgroup.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -75,6 +76,15 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-guest] [csi framework.Failf("Unable to find ready and schedulable Node") } }) + ginkgo.AfterEach(func() { + if supervisorCluster { + dumpSvcNsEventsOnTestFailure(client, namespace) + } + if guestCluster { + svcClient, svNamespace := getSvcClientAndNamespace() + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) + } + }) // Test for Pod creation works when SecurityContext has FSGroup ginkgo.It("Verify Pod Creation works when SecurityContext has FSGroup", func() { @@ -170,7 +180,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-guest] [csi ginkgo.By("Verify the volume is accessible and filegroup type is as expected") cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "ls -lh /mnt/volume1/fstype "} - output := framework.RunKubectlOrDie(namespace, cmd...) + output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, strconv.Itoa(int(fsGroup)))).NotTo(gomega.BeFalse()) gomega.Expect(strings.Contains(output, strconv.Itoa(int(runAsUser)))).NotTo(gomega.BeFalse()) diff --git a/tests/e2e/vsphere_volume_fstype.go b/tests/e2e/vsphere_volume_fstype.go index b4bbd4b944..3e2fa561ec 100644 --- a/tests/e2e/vsphere_volume_fstype.go +++ b/tests/e2e/vsphere_volume_fstype.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" admissionapi "k8s.io/pod-security-admission/api" ) @@ -141,7 +142,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") ginkgo.By("Verify the volume is accessible and filesystem type is as expected") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/vsphere_volume_mount.go b/tests/e2e/vsphere_volume_mount.go index 02e7a46e82..e740797594 100644 --- a/tests/e2e/vsphere_volume_mount.go +++ b/tests/e2e/vsphere_volume_mount.go @@ -21,16 +21,17 @@ import ( "fmt" "strings" - cnstypes "github.com/vmware/govmomi/cns/types" - ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + cnstypes "github.com/vmware/govmomi/cns/types" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" fpv "k8s.io/kubernetes/test/e2e/framework/pv" admissionapi "k8s.io/pod-security-admission/api" ) @@ -174,20 +175,20 @@ func createFileVolumeAndMount(f *framework.Framework, client clientset.Interface // Create file1.txt on Pod1. filePath := mntPath + "file1.txt" ginkgo.By("Create file1.txt on Pod1") - err = framework.CreateEmptyFileOnPod(namespace, pod1.Name, filePath) + err = e2eoutput.CreateEmptyFileOnPod(namespace, pod1.Name, filePath) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Write data on file1.txt on Pod1. data := filePath ginkgo.By("Writing the file file1.txt from Pod1") - _, err = framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod1.Name, + _, err = e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod1.Name, "--", "/bin/sh", "-c", fmt.Sprintf(" echo %s > %s ", data, filePath)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Read file1.txt created from Pod1. ginkgo.By("Read file1.txt from Pod1 created by Pod1") - output, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod1.Name, + output, err := e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod1.Name, "--", "/bin/sh", "-c", fmt.Sprintf("less %s", filePath)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("File contents from file1.txt are: %s", output)) @@ -219,7 +220,7 @@ func createFileVolumeAndMount(f *framework.Framework, client clientset.Interface // Read file1.txt created from Pod2. ginkgo.By("Read file1.txt from Pod2 created by Pod1") - output, err = framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, + output, err = e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, "--", "/bin/sh", "-c", fmt.Sprintf("less %s", filePath)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("File contents from file1.txt are: %s", output)) @@ -227,19 +228,19 @@ func createFileVolumeAndMount(f *framework.Framework, client clientset.Interface // Create a file file2.txt from Pod2. filePath = mntPath + "file2.txt" - err = framework.CreateEmptyFileOnPod(namespace, pod2.Name, filePath) + err = e2eoutput.CreateEmptyFileOnPod(namespace, pod2.Name, filePath) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Write on file2.txt from Pod2") // Writing to the file. data = filePath - _, err = framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, + _, err = e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, "--", "/bin/sh", "-c", fmt.Sprintf("echo %s > %s", data, filePath)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Read file2.txt created from Pod2. ginkgo.By("Read file2.txt from Pod2 created by Pod2") - output, err = framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, + output, err = e2ekubectl.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), pod2.Name, "--", "/bin/sh", "-c", fmt.Sprintf("less %s", filePath)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("File contents from file2.txt are: %s", output)) diff --git a/tests/e2e/vsphere_volume_with_alpha_feature.go b/tests/e2e/vsphere_volume_with_alpha_feature.go new file mode 100644 index 0000000000..d97e1089a4 --- /dev/null +++ b/tests/e2e/vsphere_volume_with_alpha_feature.go @@ -0,0 +1,207 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + cnstypes "github.com/vmware/govmomi/cns/types" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = ginkgo.Describe("Alpha feature check", func() { + + f := framework.NewDefaultFramework("alpha-features") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + var ( + client clientset.Interface + namespace string + scParameters map[string]string + pvtoBackingdiskidAnnotation string = "cns.vmware.com/pv-to-backingdiskobjectid-mapping" + datastoreURL string + ) + ginkgo.BeforeEach(func() { + bootstrap() + client = f.ClientSet + namespace = getNamespaceToRunTests(f) + scParameters = make(map[string]string) + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + nodeList, err := fnodes.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + }) + + // Verify pvc is annotated in block vanilla setup. + // Steps + // Create a Storage Class. + // Create a PVC using above SC. + // Wait for PVC to be in Bound phase. + // Verify annotation is not added on the PVC. + // Delete PVC. + // Verify PV entry is deleted from CNS. + // Delete the SC. + + ginkgo.It("[csi-block-vanilla-alpha-feature][pv-to-backingdiskobjectid-mapping] Verify pvc is annotated", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + featureEnabled := isCsiFssEnabled(ctx, client, GetAndExpectStringEnvVar(envCSINamespace), + "pv-to-backingdiskobjectid-mapping") + gomega.Expect(featureEnabled).To(gomega.BeTrue()) + ginkgo.By("Invoking Test volume status") + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var pvclaims []*v1.PersistentVolumeClaim + var err error + + ginkgo.By("CNS_TEST: Running for vanilla k8s setup") + scParameters[scParamDatastoreURL] = datastoreURL + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, + nil, scParameters, diskSize, nil, "", false, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + pvclaims = append(pvclaims, pvclaim) + + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if len(queryResult.Volumes) == 0 { + err = fmt.Errorf("QueryCNSVolumeWithResult returned no volume") + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Expect annotation is added on the pvc") + waitErr := wait.PollImmediate(pollTimeoutShort, pollTimeoutSixMin*3, func() (bool, error) { + var err error + ginkgo.By(fmt.Sprintf("Sleeping for %v minutes", pollTimeoutShort)) + pvc, err := client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error fetching pvc %q for checking pvtobackingdisk annotation: %v", pvc.Name, err) + } + pvbackingAnnotation := pvc.Annotations[pvtoBackingdiskidAnnotation] + if pvbackingAnnotation != "" { + return true, nil + } + return false, nil + }) + + gomega.Expect(waitErr).NotTo(gomega.HaveOccurred()) + }) + + // Verify pvc is not annotated in file vanilla setup. + // Steps + // Create a Storage Class. + // Create a PVC using above SC. + // Wait for PVC to be in Bound phase. + // Verify annotation is not added on the PVC. + // Delete PVC. + // Verify PV entry is deleted from CNS. + // Delete the SC. + + ginkgo.It("[csi-file-vanilla-alpha-feature][pv-to-backingdiskobjectid-mapping] "+ + "File Vanilla Verify pvc is not annotated", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + featureEnabled := isCsiFssEnabled(ctx, client, GetAndExpectStringEnvVar(envCSINamespace), + "pv-to-backingdiskobjectid-mapping") + gomega.Expect(featureEnabled).To(gomega.BeTrue()) + scParameters := make(map[string]string) + scParameters[scParamFsType] = nfs4FSType + accessMode := v1.ReadWriteMany + // Create Storage class and PVC. + ginkgo.By(fmt.Sprintf("Creating Storage Class with access mode %q and fstype %q", accessMode, nfs4FSType)) + var storageclass *storagev1.StorageClass + var pvclaim *v1.PersistentVolumeClaim + var err error + + storageclass, pvclaim, err = createPVCAndStorageClass(client, namespace, + nil, scParameters, "", nil, "", false, accessMode) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + // Waiting for PVC to be bound. + var pvclaims []*v1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvclaim) + ginkgo.By("Waiting for all claims to be in bound state") + persistentvolumes, err := fpv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + volHandle := persistentvolumes[0].Spec.CSI.VolumeHandle + defer func() { + err := fpv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle)) + queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(queryResult.Volumes).ShouldNot(gomega.BeEmpty()) + + ginkgo.By(fmt.Sprintf("volume Name:%s , capacity:%d volumeType:%s", queryResult.Volumes[0].Name, + queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsVsanFileShareBackingDetails).CapacityInMb, + queryResult.Volumes[0].VolumeType)) + + ginkgo.By("Verifying volume type specified in PVC is honored") + if queryResult.Volumes[0].VolumeType != testVolumeType { + err = fmt.Errorf("volume type is not %q", testVolumeType) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + ginkgo.By(fmt.Sprintf("Sleeping for %v minutes", pollTimeoutSixMin)) + time.Sleep(pollTimeoutSixMin) + ginkgo.By("Expect annotation is not added on the pvc") + pvc, err := client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for describe := range pvc.Annotations { + gomega.Expect(pvc.Annotations[describe]).ShouldNot(gomega.BeEquivalentTo(pvtoBackingdiskidAnnotation)) + } + }) +}) diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md index e9924d2974..289ba31d6a 100644 --- a/vendor/github.com/MakeNowJust/heredoc/README.md +++ b/vendor/github.com/MakeNowJust/heredoc/README.md @@ -1,52 +1,52 @@ -# heredoc - -[![Build Status](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![GoDoc](https://godoc.org/github.com/MakeNowJusti/heredoc?status.svg)](https://godoc.org/github.com/MakeNowJust/heredoc) - -## About - -Package heredoc provides the here-document with keeping indent. - -## Install - -```console -$ go get github.com/MakeNowJust/heredoc -``` - -## Import - -```go -// usual -import "github.com/MakeNowJust/heredoc" -``` - -## Example - -```go -package main - -import ( - "fmt" - "github.com/MakeNowJust/heredoc" -) - -func main() { - fmt.Println(heredoc.Doc(` - Lorem ipsum dolor sit amet, consectetur adipisicing elit, - sed do eiusmod tempor incididunt ut labore et dolore magna - aliqua. Ut enim ad minim veniam, ... - `)) - // Output: - // Lorem ipsum dolor sit amet, consectetur adipisicing elit, - // sed do eiusmod tempor incididunt ut labore et dolore magna - // aliqua. Ut enim ad minim veniam, ... - // -} -``` - -## API Document - - - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc) - -## License - -This software is released under the MIT License, see LICENSE. +# heredoc + +[![Build Status](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![GoDoc](https://godoc.org/github.com/MakeNowJusti/heredoc?status.svg)](https://godoc.org/github.com/MakeNowJust/heredoc) + +## About + +Package heredoc provides the here-document with keeping indent. + +## Install + +```console +$ go get github.com/MakeNowJust/heredoc +``` + +## Import + +```go +// usual +import "github.com/MakeNowJust/heredoc" +``` + +## Example + +```go +package main + +import ( + "fmt" + "github.com/MakeNowJust/heredoc" +) + +func main() { + fmt.Println(heredoc.Doc(` + Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, ... + `)) + // Output: + // Lorem ipsum dolor sit amet, consectetur adipisicing elit, + // sed do eiusmod tempor incididunt ut labore et dolore magna + // aliqua. Ut enim ad minim veniam, ... + // +} +``` + +## API Document + + - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc) + +## License + +This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/register.go deleted file mode 100644 index 91f4514242..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package. -const GroupName = "snapshot.storage.k8s.io" - -var ( - // SchemeBuilder is the new scheme builder - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme adds to scheme - AddToScheme = SchemeBuilder.AddToScheme - // SchemeGroupVersion is the group version used to register these objects. - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} -) - -// Resource takes an unqualified resource and returns a Group-qualified GroupResource. -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - SchemeBuilder.Register(addKnownTypes) -} - -// addKnownTypes adds the set of types defined in this package to the supplied scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &VolumeSnapshotClass{}, - &VolumeSnapshotClassList{}, - &VolumeSnapshot{}, - &VolumeSnapshotList{}, - &VolumeSnapshotContent{}, - &VolumeSnapshotContentList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/types.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/types.go deleted file mode 100644 index 1299d3655e..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/types.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +kubebuilder:object:generate=true -package v1beta1 - -import ( - core_v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshot is a user's request for either creating a point-in-time -// snapshot of a persistent volume, or binding to a pre-existing snapshot. -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Namespaced -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if a snapshot is ready to be used to restore a volume." -// +kubebuilder:printcolumn:name="SourcePVC",type=string,JSONPath=`.spec.source.persistentVolumeClaimName`,description="Name of the source PVC from where a dynamically taken snapshot will be created." -// +kubebuilder:printcolumn:name="SourceSnapshotContent",type=string,JSONPath=`.spec.source.volumeSnapshotContentName`,description="Name of the VolumeSnapshotContent which represents a pre-provisioned snapshot." -// +kubebuilder:printcolumn:name="RestoreSize",type=string,JSONPath=`.status.restoreSize`,description="Represents the complete size of the snapshot." -// +kubebuilder:printcolumn:name="SnapshotClass",type=string,JSONPath=`.spec.volumeSnapshotClassName`,description="The name of the VolumeSnapshotClass requested by the VolumeSnapshot." -// +kubebuilder:printcolumn:name="SnapshotContent",type=string,JSONPath=`.status.boundVolumeSnapshotContentName`,description="The name of the VolumeSnapshotContent to which this VolumeSnapshot is bound." -// +kubebuilder:printcolumn:name="CreationTime",type=date,JSONPath=`.status.creationTime`,description="Timestamp when the point-in-time snapshot is taken by the underlying storage system." -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -type VolumeSnapshot struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the desired characteristics of a snapshot requested by a user. - // More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots - // Required. - Spec VolumeSnapshotSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // status represents the current information of a snapshot. - // NOTE: status can be modified by sources other than system controllers, - // and must not be depended upon for accuracy. - // Controllers should only use information from the VolumeSnapshotContent object - // after verifying that the binding is accurate and complete. - // +optional - Status *VolumeSnapshotStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// VolumeSnapshotList is a list of VolumeSnapshot objects -type VolumeSnapshotList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of VolumeSnapshots - Items []VolumeSnapshot `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeSnapshotSpec describes the common attributes of a volume snapshot. -type VolumeSnapshotSpec struct { - // source specifies where a snapshot will be created from. - // This field is immutable after creation. - // Required. - Source VolumeSnapshotSource `json:"source" protobuf:"bytes,1,opt,name=source"` - - // volumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. - // If not specified, the default snapshot class will be used if one exists. - // If not specified, and there is no default snapshot class, dynamic snapshot creation will fail. - // Empty string is not allowed for this field. - // TODO(xiangqian): a webhook validation on empty string. - // More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes - // +optional - VolumeSnapshotClassName *string `json:"volumeSnapshotClassName,omitempty" protobuf:"bytes,2,opt,name=volumeSnapshotClassName"` -} - -// VolumeSnapshotSource specifies whether the underlying snapshot should be -// dynamically taken upon creation or if a pre-existing VolumeSnapshotContent -// object should be used. -// Exactly one of its members must be set. -// Members in VolumeSnapshotSource are immutable. -// TODO(xiangqian): Add a webhook to ensure that VolumeSnapshotSource members -// will not be updated once specified. -type VolumeSnapshotSource struct { - // persistentVolumeClaimName specifies the name of the PersistentVolumeClaim - // object in the same namespace as the VolumeSnapshot object where the - // snapshot should be dynamically taken from. - // This field is immutable. - // +optional - PersistentVolumeClaimName *string `json:"persistentVolumeClaimName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeClaimName"` - - // volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent - // object. - // This field is immutable. - // +optional - VolumeSnapshotContentName *string `json:"volumeSnapshotContentName,omitempty" protobuf:"bytes,2,opt,name=volumeSnapshotContentName"` -} - -// VolumeSnapshotStatus is the status of the VolumeSnapshot -type VolumeSnapshotStatus struct { - // boundVolumeSnapshotContentName represents the name of the VolumeSnapshotContent - // object to which the VolumeSnapshot object is bound. - // If not specified, it indicates that the VolumeSnapshot object has not been - // successfully bound to a VolumeSnapshotContent object yet. - // NOTE: Specified boundVolumeSnapshotContentName alone does not mean binding - // is valid. Controllers MUST always verify bidirectional binding between - // VolumeSnapshot and VolumeSnapshotContent to avoid possible security issues. - // +optional - BoundVolumeSnapshotContentName *string `json:"boundVolumeSnapshotContentName,omitempty" protobuf:"bytes,1,opt,name=boundVolumeSnapshotContentName"` - - // creationTime is the timestamp when the point-in-time snapshot is taken - // by the underlying storage system. - // In dynamic snapshot creation case, this field will be filled in with the - // "creation_time" value returned from CSI "CreateSnapshotRequest" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "creation_time" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // If not specified, it indicates that the creation time of the snapshot is unknown. - // +optional - CreationTime *metav1.Time `json:"creationTime,omitempty" protobuf:"bytes,2,opt,name=creationTime"` - - // readyToUse indicates if a snapshot is ready to be used to restore a volume. - // In dynamic snapshot creation case, this field will be filled in with the - // "ready_to_use" value returned from CSI "CreateSnapshotRequest" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "ready_to_use" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, - // otherwise, this field will be set to "True". - // If not specified, it means the readiness of a snapshot is unknown. - // +optional - ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,3,opt,name=readyToUse"` - - // restoreSize represents the complete size of the snapshot in bytes. - // In dynamic snapshot creation case, this field will be filled in with the - // "size_bytes" value returned from CSI "CreateSnapshotRequest" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "size_bytes" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // When restoring a volume from this snapshot, the size of the volume MUST NOT - // be smaller than the restoreSize if it is specified, otherwise the restoration will fail. - // If not specified, it indicates that the size is unknown. - // +optional - RestoreSize *resource.Quantity `json:"restoreSize,omitempty" protobuf:"bytes,4,opt,name=restoreSize"` - - // error is the last observed error during snapshot creation, if any. - // This field could be helpful to upper level controllers(i.e., application controller) - // to decide whether they should continue on waiting for the snapshot to be created - // based on the type of error reported. - // +optional - Error *VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,5,opt,name=error,casttype=VolumeSnapshotError"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotClass specifies parameters that a underlying storage system uses when -// creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its -// name in a VolumeSnapshot object. -// VolumeSnapshotClasses are non-namespaced -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster -// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.driver` -// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.deletionPolicy`,description="Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted." -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -type VolumeSnapshotClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // driver is the name of the storage driver that handles this VolumeSnapshotClass. - // Required. - Driver string `json:"driver" protobuf:"bytes,2,opt,name=driver"` - - // parameters is a key-value map with storage driver specific parameters for creating snapshots. - // These values are opaque to Kubernetes. - // +optional - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - - // deletionPolicy determines whether a VolumeSnapshotContent created through - // the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. - // Supported values are "Retain" and "Delete". - // "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. - // "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. - // Required. - DeletionPolicy DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,4,opt,name=deletionPolicy"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotClassList is a collection of VolumeSnapshotClasses. -// +kubebuilder:object:root=true -type VolumeSnapshotClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of VolumeSnapshotClasses - Items []VolumeSnapshotClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotContent represents the actual "on-disk" snapshot object in the -// underlying storage system -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if a snapshot is ready to be used to restore a volume." -// +kubebuilder:printcolumn:name="RestoreSize",type=integer,JSONPath=`.status.restoreSize`,description="Represents the complete size of the snapshot in bytes" -// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.spec.deletionPolicy`,description="Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted." -// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.spec.driver`,description="Name of the CSI driver used to create the physical snapshot on the underlying storage system." -// +kubebuilder:printcolumn:name="VolumeSnapshotClass",type=string,JSONPath=`.spec.volumeSnapshotClassName`,description="Name of the VolumeSnapshotClass to which this snapshot belongs." -// +kubebuilder:printcolumn:name="VolumeSnapshot",type=string,JSONPath=`.spec.volumeSnapshotRef.name`,description="Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound." -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` -type VolumeSnapshotContent struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines properties of a VolumeSnapshotContent created by the underlying storage system. - // Required. - Spec VolumeSnapshotContentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // status represents the current information of a snapshot. - // +optional - Status *VolumeSnapshotContentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeSnapshotContentList is a list of VolumeSnapshotContent objects -// +kubebuilder:object:root=true -type VolumeSnapshotContentList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of VolumeSnapshotContents - Items []VolumeSnapshotContent `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeSnapshotContentSpec is the specification of a VolumeSnapshotContent -type VolumeSnapshotContentSpec struct { - // volumeSnapshotRef specifies the VolumeSnapshot object to which this - // VolumeSnapshotContent object is bound. - // VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to - // this VolumeSnapshotContent's name for the bidirectional binding to be valid. - // For a pre-existing VolumeSnapshotContent object, name and namespace of the - // VolumeSnapshot object MUST be provided for binding to happen. - // This field is immutable after creation. - // Required. - VolumeSnapshotRef core_v1.ObjectReference `json:"volumeSnapshotRef" protobuf:"bytes,1,opt,name=volumeSnapshotRef"` - - // deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on - // the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. - // Supported values are "Retain" and "Delete". - // "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. - // "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. - // In dynamic snapshot creation case, this field will be filled in with the "DeletionPolicy" field defined in the - // VolumeSnapshotClass the VolumeSnapshot refers to. - // For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. - // Required. - DeletionPolicy DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,2,opt,name=deletionPolicy"` - - // driver is the name of the CSI driver used to create the physical snapshot on - // the underlying storage system. - // This MUST be the same as the name returned by the CSI GetPluginName() call for - // that driver. - // Required. - Driver string `json:"driver" protobuf:"bytes,3,opt,name=driver"` - - // name of the VolumeSnapshotClass to which this snapshot belongs. - // +optional - VolumeSnapshotClassName *string `json:"volumeSnapshotClassName,omitempty" protobuf:"bytes,4,opt,name=volumeSnapshotClassName"` - - // source specifies from where a snapshot will be created. - // This field is immutable after creation. - // Required. - Source VolumeSnapshotContentSource `json:"source" protobuf:"bytes,5,opt,name=source"` -} - -// VolumeSnapshotContentSource represents the CSI source of a snapshot. -// Exactly one of its members must be set. -// Members in VolumeSnapshotContentSource are immutable. -// TODO(xiangqian): Add a webhook to ensure that VolumeSnapshotContentSource members -// will be immutable once specified. -type VolumeSnapshotContentSource struct { - // volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot - // should be dynamically taken from. - // This field is immutable. - // +optional - VolumeHandle *string `json:"volumeHandle,omitempty" protobuf:"bytes,1,opt,name=volumeHandle"` - - // snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on - // the underlying storage system. - // This field is immutable. - // +optional - SnapshotHandle *string `json:"snapshotHandle,omitempty" protobuf:"bytes,2,opt,name=snapshotHandle"` -} - -// VolumeSnapshotContentStatus is the status of a VolumeSnapshotContent object -type VolumeSnapshotContentStatus struct { - // snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. - // If not specified, it indicates that dynamic snapshot creation has either failed - // or it is still in progress. - // +optional - SnapshotHandle *string `json:"snapshotHandle,omitempty" protobuf:"bytes,1,opt,name=snapshotHandle"` - - // creationTime is the timestamp when the point-in-time snapshot is taken - // by the underlying storage system. - // In dynamic snapshot creation case, this field will be filled in with the - // "creation_time" value returned from CSI "CreateSnapshotRequest" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "creation_time" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // If not specified, it indicates the creation time is unknown. - // The format of this field is a Unix nanoseconds time encoded as an int64. - // On Unix, the command `date +%s%N` returns the current time in nanoseconds - // since 1970-01-01 00:00:00 UTC. - // +optional - CreationTime *int64 `json:"creationTime,omitempty" protobuf:"varint,2,opt,name=creationTime"` - - // restoreSize represents the complete size of the snapshot in bytes. - // In dynamic snapshot creation case, this field will be filled in with the - // "size_bytes" value returned from CSI "CreateSnapshotRequest" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "size_bytes" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - // When restoring a volume from this snapshot, the size of the volume MUST NOT - // be smaller than the restoreSize if it is specified, otherwise the restoration will fail. - // If not specified, it indicates that the size is unknown. - // +kubebuilder:validation:Minimum=0 - // +optional - RestoreSize *int64 `json:"restoreSize,omitempty" protobuf:"bytes,3,opt,name=restoreSize"` - - // readyToUse indicates if a snapshot is ready to be used to restore a volume. - // In dynamic snapshot creation case, this field will be filled in with the - // "ready_to_use" value returned from CSI "CreateSnapshotRequest" gRPC call. - // For a pre-existing snapshot, this field will be filled with the "ready_to_use" - // value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, - // otherwise, this field will be set to "True". - // If not specified, it means the readiness of a snapshot is unknown. - // +optional. - ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,4,opt,name=readyToUse"` - - // error is the latest observed error during snapshot creation, if any. - // +optional - Error *VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,5,opt,name=error,casttype=VolumeSnapshotError"` -} - -// DeletionPolicy describes a policy for end-of-life maintenance of volume snapshot contents -// +kubebuilder:validation:Enum=Delete;Retain -type DeletionPolicy string - -const ( - // volumeSnapshotContentDelete means the snapshot will be deleted from the - // underlying storage system on release from its volume snapshot. - VolumeSnapshotContentDelete DeletionPolicy = "Delete" - - // volumeSnapshotContentRetain means the snapshot will be left in its current - // state on release from its volume snapshot. - VolumeSnapshotContentRetain DeletionPolicy = "Retain" -) - -// VolumeSnapshotError describes an error encountered during snapshot creation. -type VolumeSnapshotError struct { - // time is the timestamp when the error was encountered. - // +optional - Time *metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - - // message is a string detailing the encountered error during snapshot - // creation if specified. - // NOTE: message may be logged, and it should not contain sensitive - // information. - // +optional - Message *string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 550ff57f8a..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,424 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshot) DeepCopyInto(out *VolumeSnapshot) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(VolumeSnapshotStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshot. -func (in *VolumeSnapshot) DeepCopy() *VolumeSnapshot { - if in == nil { - return nil - } - out := new(VolumeSnapshot) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshot) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotClass) DeepCopyInto(out *VolumeSnapshotClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotClass. -func (in *VolumeSnapshotClass) DeepCopy() *VolumeSnapshotClass { - if in == nil { - return nil - } - out := new(VolumeSnapshotClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotClassList) DeepCopyInto(out *VolumeSnapshotClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeSnapshotClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotClassList. -func (in *VolumeSnapshotClassList) DeepCopy() *VolumeSnapshotClassList { - if in == nil { - return nil - } - out := new(VolumeSnapshotClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContent) DeepCopyInto(out *VolumeSnapshotContent) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(VolumeSnapshotContentStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContent. -func (in *VolumeSnapshotContent) DeepCopy() *VolumeSnapshotContent { - if in == nil { - return nil - } - out := new(VolumeSnapshotContent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotContent) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentList) DeepCopyInto(out *VolumeSnapshotContentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeSnapshotContent, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentList. -func (in *VolumeSnapshotContentList) DeepCopy() *VolumeSnapshotContentList { - if in == nil { - return nil - } - out := new(VolumeSnapshotContentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotContentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentSource) DeepCopyInto(out *VolumeSnapshotContentSource) { - *out = *in - if in.VolumeHandle != nil { - in, out := &in.VolumeHandle, &out.VolumeHandle - *out = new(string) - **out = **in - } - if in.SnapshotHandle != nil { - in, out := &in.SnapshotHandle, &out.SnapshotHandle - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentSource. -func (in *VolumeSnapshotContentSource) DeepCopy() *VolumeSnapshotContentSource { - if in == nil { - return nil - } - out := new(VolumeSnapshotContentSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentSpec) DeepCopyInto(out *VolumeSnapshotContentSpec) { - *out = *in - out.VolumeSnapshotRef = in.VolumeSnapshotRef - if in.VolumeSnapshotClassName != nil { - in, out := &in.VolumeSnapshotClassName, &out.VolumeSnapshotClassName - *out = new(string) - **out = **in - } - in.Source.DeepCopyInto(&out.Source) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentSpec. -func (in *VolumeSnapshotContentSpec) DeepCopy() *VolumeSnapshotContentSpec { - if in == nil { - return nil - } - out := new(VolumeSnapshotContentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotContentStatus) DeepCopyInto(out *VolumeSnapshotContentStatus) { - *out = *in - if in.SnapshotHandle != nil { - in, out := &in.SnapshotHandle, &out.SnapshotHandle - *out = new(string) - **out = **in - } - if in.CreationTime != nil { - in, out := &in.CreationTime, &out.CreationTime - *out = new(int64) - **out = **in - } - if in.RestoreSize != nil { - in, out := &in.RestoreSize, &out.RestoreSize - *out = new(int64) - **out = **in - } - if in.ReadyToUse != nil { - in, out := &in.ReadyToUse, &out.ReadyToUse - *out = new(bool) - **out = **in - } - if in.Error != nil { - in, out := &in.Error, &out.Error - *out = new(VolumeSnapshotError) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentStatus. -func (in *VolumeSnapshotContentStatus) DeepCopy() *VolumeSnapshotContentStatus { - if in == nil { - return nil - } - out := new(VolumeSnapshotContentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotError) DeepCopyInto(out *VolumeSnapshotError) { - *out = *in - if in.Time != nil { - in, out := &in.Time, &out.Time - *out = (*in).DeepCopy() - } - if in.Message != nil { - in, out := &in.Message, &out.Message - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotError. -func (in *VolumeSnapshotError) DeepCopy() *VolumeSnapshotError { - if in == nil { - return nil - } - out := new(VolumeSnapshotError) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotList) DeepCopyInto(out *VolumeSnapshotList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeSnapshot, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotList. -func (in *VolumeSnapshotList) DeepCopy() *VolumeSnapshotList { - if in == nil { - return nil - } - out := new(VolumeSnapshotList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeSnapshotList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotSource) DeepCopyInto(out *VolumeSnapshotSource) { - *out = *in - if in.PersistentVolumeClaimName != nil { - in, out := &in.PersistentVolumeClaimName, &out.PersistentVolumeClaimName - *out = new(string) - **out = **in - } - if in.VolumeSnapshotContentName != nil { - in, out := &in.VolumeSnapshotContentName, &out.VolumeSnapshotContentName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotSource. -func (in *VolumeSnapshotSource) DeepCopy() *VolumeSnapshotSource { - if in == nil { - return nil - } - out := new(VolumeSnapshotSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotSpec) DeepCopyInto(out *VolumeSnapshotSpec) { - *out = *in - in.Source.DeepCopyInto(&out.Source) - if in.VolumeSnapshotClassName != nil { - in, out := &in.VolumeSnapshotClassName, &out.VolumeSnapshotClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotSpec. -func (in *VolumeSnapshotSpec) DeepCopy() *VolumeSnapshotSpec { - if in == nil { - return nil - } - out := new(VolumeSnapshotSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSnapshotStatus) DeepCopyInto(out *VolumeSnapshotStatus) { - *out = *in - if in.BoundVolumeSnapshotContentName != nil { - in, out := &in.BoundVolumeSnapshotContentName, &out.BoundVolumeSnapshotContentName - *out = new(string) - **out = **in - } - if in.CreationTime != nil { - in, out := &in.CreationTime, &out.CreationTime - *out = (*in).DeepCopy() - } - if in.ReadyToUse != nil { - in, out := &in.ReadyToUse, &out.ReadyToUse - *out = new(bool) - **out = **in - } - if in.RestoreSize != nil { - in, out := &in.RestoreSize, &out.RestoreSize - x := (*in).DeepCopy() - *out = &x - } - if in.Error != nil { - in, out := &in.Error, &out.Error - *out = new(VolumeSnapshotError) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotStatus. -func (in *VolumeSnapshotStatus) DeepCopy() *VolumeSnapshotStatus { - if in == nil { - return nil - } - out := new(VolumeSnapshotStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/doc.go deleted file mode 100644 index 68f9a55b51..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/doc.go deleted file mode 100644 index 0243e68ff4..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshot.go deleted file mode 100644 index 532f4d08af..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshot.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeVolumeSnapshots implements VolumeSnapshotInterface -type FakeVolumeSnapshots struct { - Fake *FakeSnapshotV1beta1 - ns string -} - -var volumesnapshotsResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1beta1", Resource: "volumesnapshots"} - -var volumesnapshotsKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1beta1", Kind: "VolumeSnapshot"} - -// Get takes name of the volumeSnapshot, and returns the corresponding volumeSnapshot object, and an error if there is any. -func (c *FakeVolumeSnapshots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeSnapshot, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(volumesnapshotsResource, c.ns, name), &v1beta1.VolumeSnapshot{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshot), err -} - -// List takes label and field selectors, and returns the list of VolumeSnapshots that match those selectors. -func (c *FakeVolumeSnapshots) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeSnapshotList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(volumesnapshotsResource, volumesnapshotsKind, c.ns, opts), &v1beta1.VolumeSnapshotList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VolumeSnapshotList{ListMeta: obj.(*v1beta1.VolumeSnapshotList).ListMeta} - for _, item := range obj.(*v1beta1.VolumeSnapshotList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeSnapshots. -func (c *FakeVolumeSnapshots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(volumesnapshotsResource, c.ns, opts)) - -} - -// Create takes the representation of a volumeSnapshot and creates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. -func (c *FakeVolumeSnapshots) Create(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.CreateOptions) (result *v1beta1.VolumeSnapshot, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(volumesnapshotsResource, c.ns, volumeSnapshot), &v1beta1.VolumeSnapshot{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshot), err -} - -// Update takes the representation of a volumeSnapshot and updates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. -func (c *FakeVolumeSnapshots) Update(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshot, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(volumesnapshotsResource, c.ns, volumeSnapshot), &v1beta1.VolumeSnapshot{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshot), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeSnapshots) UpdateStatus(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.UpdateOptions) (*v1beta1.VolumeSnapshot, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(volumesnapshotsResource, "status", c.ns, volumeSnapshot), &v1beta1.VolumeSnapshot{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshot), err -} - -// Delete takes name of the volumeSnapshot and deletes it. Returns an error if one occurs. -func (c *FakeVolumeSnapshots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(volumesnapshotsResource, c.ns, name), &v1beta1.VolumeSnapshot{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeSnapshots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(volumesnapshotsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VolumeSnapshotList{}) - return err -} - -// Patch applies the patch and returns the patched volumeSnapshot. -func (c *FakeVolumeSnapshots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshot, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(volumesnapshotsResource, c.ns, name, pt, data, subresources...), &v1beta1.VolumeSnapshot{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshot), err -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshot_client.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshot_client.go deleted file mode 100644 index dde6a3fb52..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshot_client.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeSnapshotV1beta1 struct { - *testing.Fake -} - -func (c *FakeSnapshotV1beta1) VolumeSnapshots(namespace string) v1beta1.VolumeSnapshotInterface { - return &FakeVolumeSnapshots{c, namespace} -} - -func (c *FakeSnapshotV1beta1) VolumeSnapshotClasses() v1beta1.VolumeSnapshotClassInterface { - return &FakeVolumeSnapshotClasses{c} -} - -func (c *FakeSnapshotV1beta1) VolumeSnapshotContents() v1beta1.VolumeSnapshotContentInterface { - return &FakeVolumeSnapshotContents{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeSnapshotV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshotclass.go deleted file mode 100644 index 2decfe30a6..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshotclass.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeVolumeSnapshotClasses implements VolumeSnapshotClassInterface -type FakeVolumeSnapshotClasses struct { - Fake *FakeSnapshotV1beta1 -} - -var volumesnapshotclassesResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1beta1", Resource: "volumesnapshotclasses"} - -var volumesnapshotclassesKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1beta1", Kind: "VolumeSnapshotClass"} - -// Get takes name of the volumeSnapshotClass, and returns the corresponding volumeSnapshotClass object, and an error if there is any. -func (c *FakeVolumeSnapshotClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeSnapshotClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumesnapshotclassesResource, name), &v1beta1.VolumeSnapshotClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotClass), err -} - -// List takes label and field selectors, and returns the list of VolumeSnapshotClasses that match those selectors. -func (c *FakeVolumeSnapshotClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeSnapshotClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumesnapshotclassesResource, volumesnapshotclassesKind, opts), &v1beta1.VolumeSnapshotClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VolumeSnapshotClassList{ListMeta: obj.(*v1beta1.VolumeSnapshotClassList).ListMeta} - for _, item := range obj.(*v1beta1.VolumeSnapshotClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeSnapshotClasses. -func (c *FakeVolumeSnapshotClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumesnapshotclassesResource, opts)) -} - -// Create takes the representation of a volumeSnapshotClass and creates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. -func (c *FakeVolumeSnapshotClasses) Create(ctx context.Context, volumeSnapshotClass *v1beta1.VolumeSnapshotClass, opts v1.CreateOptions) (result *v1beta1.VolumeSnapshotClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumesnapshotclassesResource, volumeSnapshotClass), &v1beta1.VolumeSnapshotClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotClass), err -} - -// Update takes the representation of a volumeSnapshotClass and updates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. -func (c *FakeVolumeSnapshotClasses) Update(ctx context.Context, volumeSnapshotClass *v1beta1.VolumeSnapshotClass, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshotClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumesnapshotclassesResource, volumeSnapshotClass), &v1beta1.VolumeSnapshotClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotClass), err -} - -// Delete takes name of the volumeSnapshotClass and deletes it. Returns an error if one occurs. -func (c *FakeVolumeSnapshotClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(volumesnapshotclassesResource, name), &v1beta1.VolumeSnapshotClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeSnapshotClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumesnapshotclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VolumeSnapshotClassList{}) - return err -} - -// Patch applies the patch and returns the patched volumeSnapshotClass. -func (c *FakeVolumeSnapshotClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshotClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumesnapshotclassesResource, name, pt, data, subresources...), &v1beta1.VolumeSnapshotClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotClass), err -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshotcontent.go deleted file mode 100644 index 71fda210b0..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake/fake_volumesnapshotcontent.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeVolumeSnapshotContents implements VolumeSnapshotContentInterface -type FakeVolumeSnapshotContents struct { - Fake *FakeSnapshotV1beta1 -} - -var volumesnapshotcontentsResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1beta1", Resource: "volumesnapshotcontents"} - -var volumesnapshotcontentsKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1beta1", Kind: "VolumeSnapshotContent"} - -// Get takes name of the volumeSnapshotContent, and returns the corresponding volumeSnapshotContent object, and an error if there is any. -func (c *FakeVolumeSnapshotContents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeSnapshotContent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumesnapshotcontentsResource, name), &v1beta1.VolumeSnapshotContent{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotContent), err -} - -// List takes label and field selectors, and returns the list of VolumeSnapshotContents that match those selectors. -func (c *FakeVolumeSnapshotContents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeSnapshotContentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumesnapshotcontentsResource, volumesnapshotcontentsKind, opts), &v1beta1.VolumeSnapshotContentList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VolumeSnapshotContentList{ListMeta: obj.(*v1beta1.VolumeSnapshotContentList).ListMeta} - for _, item := range obj.(*v1beta1.VolumeSnapshotContentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeSnapshotContents. -func (c *FakeVolumeSnapshotContents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumesnapshotcontentsResource, opts)) -} - -// Create takes the representation of a volumeSnapshotContent and creates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. -func (c *FakeVolumeSnapshotContents) Create(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.CreateOptions) (result *v1beta1.VolumeSnapshotContent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumesnapshotcontentsResource, volumeSnapshotContent), &v1beta1.VolumeSnapshotContent{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotContent), err -} - -// Update takes the representation of a volumeSnapshotContent and updates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. -func (c *FakeVolumeSnapshotContents) Update(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshotContent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumesnapshotcontentsResource, volumeSnapshotContent), &v1beta1.VolumeSnapshotContent{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotContent), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeSnapshotContents) UpdateStatus(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.UpdateOptions) (*v1beta1.VolumeSnapshotContent, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumesnapshotcontentsResource, "status", volumeSnapshotContent), &v1beta1.VolumeSnapshotContent{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotContent), err -} - -// Delete takes name of the volumeSnapshotContent and deletes it. Returns an error if one occurs. -func (c *FakeVolumeSnapshotContents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(volumesnapshotcontentsResource, name), &v1beta1.VolumeSnapshotContent{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeSnapshotContents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumesnapshotcontentsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VolumeSnapshotContentList{}) - return err -} - -// Patch applies the patch and returns the patched volumeSnapshotContent. -func (c *FakeVolumeSnapshotContents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshotContent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumesnapshotcontentsResource, name, pt, data, subresources...), &v1beta1.VolumeSnapshotContent{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeSnapshotContent), err -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/generated_expansion.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/generated_expansion.go deleted file mode 100644 index 181fe9947e..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type VolumeSnapshotExpansion interface{} - -type VolumeSnapshotClassExpansion interface{} - -type VolumeSnapshotContentExpansion interface{} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshot.go deleted file mode 100644 index 37e6c5e2ea..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshot.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" - scheme "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// VolumeSnapshotsGetter has a method to return a VolumeSnapshotInterface. -// A group's client should implement this interface. -type VolumeSnapshotsGetter interface { - VolumeSnapshots(namespace string) VolumeSnapshotInterface -} - -// VolumeSnapshotInterface has methods to work with VolumeSnapshot resources. -type VolumeSnapshotInterface interface { - Create(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.CreateOptions) (*v1beta1.VolumeSnapshot, error) - Update(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.UpdateOptions) (*v1beta1.VolumeSnapshot, error) - UpdateStatus(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.UpdateOptions) (*v1beta1.VolumeSnapshot, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeSnapshot, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeSnapshotList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshot, err error) - VolumeSnapshotExpansion -} - -// volumeSnapshots implements VolumeSnapshotInterface -type volumeSnapshots struct { - client rest.Interface - ns string -} - -// newVolumeSnapshots returns a VolumeSnapshots -func newVolumeSnapshots(c *SnapshotV1beta1Client, namespace string) *volumeSnapshots { - return &volumeSnapshots{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the volumeSnapshot, and returns the corresponding volumeSnapshot object, and an error if there is any. -func (c *volumeSnapshots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeSnapshot, err error) { - result = &v1beta1.VolumeSnapshot{} - err = c.client.Get(). - Namespace(c.ns). - Resource("volumesnapshots"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeSnapshots that match those selectors. -func (c *volumeSnapshots) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeSnapshotList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VolumeSnapshotList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("volumesnapshots"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeSnapshots. -func (c *volumeSnapshots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("volumesnapshots"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeSnapshot and creates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. -func (c *volumeSnapshots) Create(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.CreateOptions) (result *v1beta1.VolumeSnapshot, err error) { - result = &v1beta1.VolumeSnapshot{} - err = c.client.Post(). - Namespace(c.ns). - Resource("volumesnapshots"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshot). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeSnapshot and updates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. -func (c *volumeSnapshots) Update(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshot, err error) { - result = &v1beta1.VolumeSnapshot{} - err = c.client.Put(). - Namespace(c.ns). - Resource("volumesnapshots"). - Name(volumeSnapshot.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshot). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeSnapshots) UpdateStatus(ctx context.Context, volumeSnapshot *v1beta1.VolumeSnapshot, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshot, err error) { - result = &v1beta1.VolumeSnapshot{} - err = c.client.Put(). - Namespace(c.ns). - Resource("volumesnapshots"). - Name(volumeSnapshot.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshot). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeSnapshot and deletes it. Returns an error if one occurs. -func (c *volumeSnapshots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("volumesnapshots"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeSnapshots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("volumesnapshots"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeSnapshot. -func (c *volumeSnapshots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshot, err error) { - result = &v1beta1.VolumeSnapshot{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("volumesnapshots"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshot_client.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshot_client.go deleted file mode 100644 index 347e1042a7..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshot_client.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" - "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type SnapshotV1beta1Interface interface { - RESTClient() rest.Interface - VolumeSnapshotsGetter - VolumeSnapshotClassesGetter - VolumeSnapshotContentsGetter -} - -// SnapshotV1beta1Client is used to interact with features provided by the snapshot.storage.k8s.io group. -type SnapshotV1beta1Client struct { - restClient rest.Interface -} - -func (c *SnapshotV1beta1Client) VolumeSnapshots(namespace string) VolumeSnapshotInterface { - return newVolumeSnapshots(c, namespace) -} - -func (c *SnapshotV1beta1Client) VolumeSnapshotClasses() VolumeSnapshotClassInterface { - return newVolumeSnapshotClasses(c) -} - -func (c *SnapshotV1beta1Client) VolumeSnapshotContents() VolumeSnapshotContentInterface { - return newVolumeSnapshotContents(c) -} - -// NewForConfig creates a new SnapshotV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*SnapshotV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SnapshotV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new SnapshotV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SnapshotV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SnapshotV1beta1Client for the given RESTClient. -func New(c rest.Interface) *SnapshotV1beta1Client { - return &SnapshotV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SnapshotV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshotclass.go deleted file mode 100644 index 056b970516..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshotclass.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" - scheme "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// VolumeSnapshotClassesGetter has a method to return a VolumeSnapshotClassInterface. -// A group's client should implement this interface. -type VolumeSnapshotClassesGetter interface { - VolumeSnapshotClasses() VolumeSnapshotClassInterface -} - -// VolumeSnapshotClassInterface has methods to work with VolumeSnapshotClass resources. -type VolumeSnapshotClassInterface interface { - Create(ctx context.Context, volumeSnapshotClass *v1beta1.VolumeSnapshotClass, opts v1.CreateOptions) (*v1beta1.VolumeSnapshotClass, error) - Update(ctx context.Context, volumeSnapshotClass *v1beta1.VolumeSnapshotClass, opts v1.UpdateOptions) (*v1beta1.VolumeSnapshotClass, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeSnapshotClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeSnapshotClassList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshotClass, err error) - VolumeSnapshotClassExpansion -} - -// volumeSnapshotClasses implements VolumeSnapshotClassInterface -type volumeSnapshotClasses struct { - client rest.Interface -} - -// newVolumeSnapshotClasses returns a VolumeSnapshotClasses -func newVolumeSnapshotClasses(c *SnapshotV1beta1Client) *volumeSnapshotClasses { - return &volumeSnapshotClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the volumeSnapshotClass, and returns the corresponding volumeSnapshotClass object, and an error if there is any. -func (c *volumeSnapshotClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeSnapshotClass, err error) { - result = &v1beta1.VolumeSnapshotClass{} - err = c.client.Get(). - Resource("volumesnapshotclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeSnapshotClasses that match those selectors. -func (c *volumeSnapshotClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeSnapshotClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VolumeSnapshotClassList{} - err = c.client.Get(). - Resource("volumesnapshotclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeSnapshotClasses. -func (c *volumeSnapshotClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumesnapshotclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeSnapshotClass and creates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. -func (c *volumeSnapshotClasses) Create(ctx context.Context, volumeSnapshotClass *v1beta1.VolumeSnapshotClass, opts v1.CreateOptions) (result *v1beta1.VolumeSnapshotClass, err error) { - result = &v1beta1.VolumeSnapshotClass{} - err = c.client.Post(). - Resource("volumesnapshotclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshotClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeSnapshotClass and updates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. -func (c *volumeSnapshotClasses) Update(ctx context.Context, volumeSnapshotClass *v1beta1.VolumeSnapshotClass, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshotClass, err error) { - result = &v1beta1.VolumeSnapshotClass{} - err = c.client.Put(). - Resource("volumesnapshotclasses"). - Name(volumeSnapshotClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshotClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeSnapshotClass and deletes it. Returns an error if one occurs. -func (c *volumeSnapshotClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumesnapshotclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeSnapshotClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumesnapshotclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeSnapshotClass. -func (c *volumeSnapshotClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshotClass, err error) { - result = &v1beta1.VolumeSnapshotClass{} - err = c.client.Patch(pt). - Resource("volumesnapshotclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshotcontent.go deleted file mode 100644 index eab9ce2a58..0000000000 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/volumesnapshotcontent.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" - scheme "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// VolumeSnapshotContentsGetter has a method to return a VolumeSnapshotContentInterface. -// A group's client should implement this interface. -type VolumeSnapshotContentsGetter interface { - VolumeSnapshotContents() VolumeSnapshotContentInterface -} - -// VolumeSnapshotContentInterface has methods to work with VolumeSnapshotContent resources. -type VolumeSnapshotContentInterface interface { - Create(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.CreateOptions) (*v1beta1.VolumeSnapshotContent, error) - Update(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.UpdateOptions) (*v1beta1.VolumeSnapshotContent, error) - UpdateStatus(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.UpdateOptions) (*v1beta1.VolumeSnapshotContent, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeSnapshotContent, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeSnapshotContentList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshotContent, err error) - VolumeSnapshotContentExpansion -} - -// volumeSnapshotContents implements VolumeSnapshotContentInterface -type volumeSnapshotContents struct { - client rest.Interface -} - -// newVolumeSnapshotContents returns a VolumeSnapshotContents -func newVolumeSnapshotContents(c *SnapshotV1beta1Client) *volumeSnapshotContents { - return &volumeSnapshotContents{ - client: c.RESTClient(), - } -} - -// Get takes name of the volumeSnapshotContent, and returns the corresponding volumeSnapshotContent object, and an error if there is any. -func (c *volumeSnapshotContents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeSnapshotContent, err error) { - result = &v1beta1.VolumeSnapshotContent{} - err = c.client.Get(). - Resource("volumesnapshotcontents"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeSnapshotContents that match those selectors. -func (c *volumeSnapshotContents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeSnapshotContentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VolumeSnapshotContentList{} - err = c.client.Get(). - Resource("volumesnapshotcontents"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeSnapshotContents. -func (c *volumeSnapshotContents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumesnapshotcontents"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeSnapshotContent and creates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. -func (c *volumeSnapshotContents) Create(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.CreateOptions) (result *v1beta1.VolumeSnapshotContent, err error) { - result = &v1beta1.VolumeSnapshotContent{} - err = c.client.Post(). - Resource("volumesnapshotcontents"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshotContent). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeSnapshotContent and updates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. -func (c *volumeSnapshotContents) Update(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshotContent, err error) { - result = &v1beta1.VolumeSnapshotContent{} - err = c.client.Put(). - Resource("volumesnapshotcontents"). - Name(volumeSnapshotContent.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshotContent). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeSnapshotContents) UpdateStatus(ctx context.Context, volumeSnapshotContent *v1beta1.VolumeSnapshotContent, opts v1.UpdateOptions) (result *v1beta1.VolumeSnapshotContent, err error) { - result = &v1beta1.VolumeSnapshotContent{} - err = c.client.Put(). - Resource("volumesnapshotcontents"). - Name(volumeSnapshotContent.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeSnapshotContent). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeSnapshotContent and deletes it. Returns an error if one occurs. -func (c *volumeSnapshotContents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumesnapshotcontents"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeSnapshotContents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumesnapshotcontents"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeSnapshotContent. -func (c *volumeSnapshotContents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeSnapshotContent, err error) { - result = &v1beta1.VolumeSnapshotContent{} - err = c.client.Patch(pt). - Resource("volumesnapshotcontents"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/LICENSE b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/LICENSE similarity index 100% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/LICENSE rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/LICENSE diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/doc.go similarity index 100% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/doc.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/doc.go diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/register.go similarity index 100% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/register.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/register.go diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/types.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/types.go similarity index 93% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/types.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/types.go index b9745df215..51ed543b0d 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/types.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/types.go @@ -29,7 +29,7 @@ import ( // VolumeSnapshot is a user's request for either creating a point-in-time // snapshot of a persistent volume, or binding to a pre-existing snapshot. // +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:resource:scope=Namespaced,shortName=vs // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if the snapshot is ready to be used to restore a volume." // +kubebuilder:printcolumn:name="SourcePVC",type=string,JSONPath=`.spec.source.persistentVolumeClaimName`,description="If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created." @@ -42,7 +42,7 @@ import ( type VolumeSnapshot struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -123,11 +123,11 @@ type VolumeSnapshotSource struct { // VolumeSnapshotStatus and VolumeSnapshotContentStatus. Fields in VolumeSnapshotStatus // are updated based on fields in VolumeSnapshotContentStatus. They are eventual // consistency. These fields are duplicate in both objects due to the following reasons: -// - Fields in VolumeSnapshotContentStatus can be used for filtering when importing a -// volumesnapshot. -// - VolumsnapshotStatus is used by end users because they cannot see VolumeSnapshotContent. -// - CSI snapshotter sidecar is light weight as it only watches VolumeSnapshotContent -// object, not VolumeSnapshot object. +// - Fields in VolumeSnapshotContentStatus can be used for filtering when importing a +// volumesnapshot. +// - VolumsnapshotStatus is used by end users because they cannot see VolumeSnapshotContent. +// - CSI snapshotter sidecar is light weight as it only watches VolumeSnapshotContent +// object, not VolumeSnapshot object. type VolumeSnapshotStatus struct { // boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent // object to which this VolumeSnapshot object intends to bind to. @@ -179,7 +179,7 @@ type VolumeSnapshotStatus struct { // This field could be helpful to upper level controllers(i.e., application controller) // to decide whether they should continue on waiting for the snapshot to be created // based on the type of error reported. - // The snapshot controller will keep retrying when an error occurrs during the + // The snapshot controller will keep retrying when an error occurs during the // snapshot creation. Upon success, this error field will be cleared. // +optional Error *VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,5,opt,name=error,casttype=VolumeSnapshotError"` @@ -194,14 +194,14 @@ type VolumeSnapshotStatus struct { // name in a VolumeSnapshot object. // VolumeSnapshotClasses are non-namespaced // +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster +// +kubebuilder:resource:scope=Cluster,shortName=vsclass;vsclasses // +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.driver` // +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.deletionPolicy`,description="Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted." // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` type VolumeSnapshotClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -230,7 +230,7 @@ type VolumeSnapshotClass struct { type VolumeSnapshotClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -245,7 +245,7 @@ type VolumeSnapshotClassList struct { // VolumeSnapshotContent represents the actual "on-disk" snapshot object in the // underlying storage system // +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster +// +kubebuilder:resource:scope=Cluster,shortName=vsc;vscs // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if the snapshot is ready to be used to restore a volume." // +kubebuilder:printcolumn:name="RestoreSize",type=integer,JSONPath=`.status.restoreSize`,description="Represents the complete size of the snapshot in bytes" @@ -257,7 +257,7 @@ type VolumeSnapshotClassList struct { type VolumeSnapshotContent struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -328,13 +328,19 @@ type VolumeSnapshotContentSpec struct { // This field is immutable after creation. // Required. Source VolumeSnapshotContentSource `json:"source" protobuf:"bytes,5,opt,name=source"` + + // SourceVolumeMode is the mode of the volume whose snapshot is taken. + // Can be either “Filesystem” or “Block”. + // If not specified, it indicates the source volume's mode is unknown. + // This field is immutable. + // This field is an alpha field. + // +optional + SourceVolumeMode *core_v1.PersistentVolumeMode `json:"sourceVolumeMode" protobuf:"bytes,6,opt,name=sourceVolumeMode"` } // VolumeSnapshotContentSource represents the CSI source of a snapshot. // Exactly one of its members must be set. // Members in VolumeSnapshotContentSource are immutable. -// TODO(xiangqian): Add a webhook to ensure that VolumeSnapshotContentSource members -// will be immutable once specified. type VolumeSnapshotContentSource struct { // volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot // should be dynamically taken from. @@ -355,11 +361,11 @@ type VolumeSnapshotContentSource struct { // VolumeSnapshotStatus and VolumeSnapshotContentStatus. Fields in VolumeSnapshotStatus // are updated based on fields in VolumeSnapshotContentStatus. They are eventual // consistency. These fields are duplicate in both objects due to the following reasons: -// - Fields in VolumeSnapshotContentStatus can be used for filtering when importing a -// volumesnapshot. -// - VolumsnapshotStatus is used by end users because they cannot see VolumeSnapshotContent. -// - CSI snapshotter sidecar is light weight as it only watches VolumeSnapshotContent -// object, not VolumeSnapshot object. +// - Fields in VolumeSnapshotContentStatus can be used for filtering when importing a +// volumesnapshot. +// - VolumsnapshotStatus is used by end users because they cannot see VolumeSnapshotContent. +// - CSI snapshotter sidecar is light weight as it only watches VolumeSnapshotContent +// object, not VolumeSnapshot object. type VolumeSnapshotContentStatus struct { // snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. // If not specified, it indicates that dynamic snapshot creation has either failed diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/zz_generated.deepcopy.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/zz_generated.deepcopy.go similarity index 98% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/zz_generated.deepcopy.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/zz_generated.deepcopy.go index febab0cd40..901b2b080c 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +22,7 @@ limitations under the License. package v1 import ( + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -223,6 +225,11 @@ func (in *VolumeSnapshotContentSpec) DeepCopyInto(out *VolumeSnapshotContentSpec **out = **in } in.Source.DeepCopyInto(&out.Source) + if in.SourceVolumeMode != nil { + in, out := &in.SourceVolumeMode, &out.SourceVolumeMode + *out = new(corev1.PersistentVolumeMode) + **out = **in + } return } diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/clientset.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/clientset.go similarity index 69% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/clientset.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/clientset.go index 2e01b2327b..77d96ce98c 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/clientset.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ package versioned import ( "fmt" + "net/http" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1" - snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -30,7 +30,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface - SnapshotV1beta1() snapshotv1beta1.SnapshotV1beta1Interface SnapshotV1() snapshotv1.SnapshotV1Interface } @@ -38,13 +37,7 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - snapshotV1beta1 *snapshotv1beta1.SnapshotV1beta1Client - snapshotV1 *snapshotv1.SnapshotV1Client -} - -// SnapshotV1beta1 retrieves the SnapshotV1beta1Client -func (c *Clientset) SnapshotV1beta1() snapshotv1beta1.SnapshotV1beta1Interface { - return c.snapshotV1beta1 + snapshotV1 *snapshotv1.SnapshotV1Client } // SnapshotV1 retrieves the SnapshotV1Client @@ -63,26 +56,45 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error - cs.snapshotV1beta1, err = snapshotv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.snapshotV1, err = snapshotv1.NewForConfig(&configShallowCopy) + cs.snapshotV1, err = snapshotv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -92,18 +104,16 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.snapshotV1beta1 = snapshotv1beta1.NewForConfigOrDie(c) - cs.snapshotV1 = snapshotv1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs } // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.snapshotV1beta1 = snapshotv1beta1.New(c) cs.snapshotV1 = snapshotv1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/doc.go similarity index 94% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/doc.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/doc.go index 6ee812fc57..ab7539cb9f 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/doc.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/clientset_generated.go similarity index 79% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/clientset_generated.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/clientset_generated.go index bf0ed24396..b3ab82a6e8 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,11 +19,9 @@ limitations under the License. package fake import ( - clientset "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1" - fakesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake" - snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1" - fakesnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1/fake" + clientset "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1" + fakesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -76,12 +74,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } -var _ clientset.Interface = &Clientset{} - -// SnapshotV1beta1 retrieves the SnapshotV1beta1Client -func (c *Clientset) SnapshotV1beta1() snapshotv1beta1.SnapshotV1beta1Interface { - return &fakesnapshotv1beta1.FakeSnapshotV1beta1{Fake: &c.Fake} -} +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) // SnapshotV1 retrieves the SnapshotV1Client func (c *Clientset) SnapshotV1() snapshotv1.SnapshotV1Interface { diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/doc.go similarity index 94% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/doc.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/doc.go index d6baf01acb..7d98eabcc8 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/doc.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/register.go similarity index 72% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/register.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/register.go index eae2547754..5ae20f2237 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake/register.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,7 @@ limitations under the License. package fake import ( - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,21 +31,20 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - snapshotv1beta1.AddToScheme, snapshotv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme/doc.go similarity index 94% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme/doc.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme/doc.go index 7d06c9402d..288d3794dc 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme/doc.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme/register.go similarity index 73% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme/register.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme/register.go index 46e6628fdb..df04449fce 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme/register.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,7 @@ limitations under the License. package scheme import ( - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,21 +31,20 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - snapshotv1beta1.AddToScheme, snapshotv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/doc.go similarity index 94% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/doc.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/doc.go index 1917a62947..01fa5fd655 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/doc.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/doc.go similarity index 94% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/doc.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/doc.go index 0243e68ff4..dd9e9e4c8f 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/doc.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot.go similarity index 96% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot.go index 286104b031..f6cdb4669d 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ package fake import ( "context" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -117,7 +117,7 @@ func (c *FakeVolumeSnapshots) UpdateStatus(ctx context.Context, volumeSnapshot * // Delete takes name of the volumeSnapshot and deletes it. Returns an error if one occurs. func (c *FakeVolumeSnapshots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(volumesnapshotsResource, c.ns, name), &volumesnapshotv1.VolumeSnapshot{}) + Invokes(testing.NewDeleteActionWithOptions(volumesnapshotsResource, c.ns, name, opts), &volumesnapshotv1.VolumeSnapshot{}) return err } diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot_client.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot_client.go similarity index 92% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot_client.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot_client.go index 31fd4be64b..866dce2f41 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot_client.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshot_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1" + v1 "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotclass.go similarity index 96% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotclass.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotclass.go index eed85bd85e..e471b6b24d 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotclass.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotclass.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ package fake import ( "context" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -99,7 +99,7 @@ func (c *FakeVolumeSnapshotClasses) Update(ctx context.Context, volumeSnapshotCl // Delete takes name of the volumeSnapshotClass and deletes it. Returns an error if one occurs. func (c *FakeVolumeSnapshotClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(volumesnapshotclassesResource, name), &volumesnapshotv1.VolumeSnapshotClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(volumesnapshotclassesResource, name, opts), &volumesnapshotv1.VolumeSnapshotClass{}) return err } diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotcontent.go similarity index 96% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotcontent.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotcontent.go index 176c760a28..32b9f191dc 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotcontent.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/fake/fake_volumesnapshotcontent.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ package fake import ( "context" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -110,7 +110,7 @@ func (c *FakeVolumeSnapshotContents) UpdateStatus(ctx context.Context, volumeSna // Delete takes name of the volumeSnapshotContent and deletes it. Returns an error if one occurs. func (c *FakeVolumeSnapshotContents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(volumesnapshotcontentsResource, name), &volumesnapshotv1.VolumeSnapshotContent{}) + Invokes(testing.NewRootDeleteActionWithOptions(volumesnapshotcontentsResource, name, opts), &volumesnapshotv1.VolumeSnapshotContent{}) return err } diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/generated_expansion.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/generated_expansion.go similarity index 94% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/generated_expansion.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/generated_expansion.go index 88df72cf7d..cae7c1cace 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/generated_expansion.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot.go similarity index 98% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot.go index 36a02388c8..6160690f0e 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,8 +22,8 @@ import ( "context" "time" - v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - scheme "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" + v1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + scheme "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot_client.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot_client.go similarity index 74% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot_client.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot_client.go index 996f1e16bd..938dbdb75b 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot_client.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshot_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,10 @@ limitations under the License. package v1 import ( - v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" + "net/http" + + v1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -49,12 +51,28 @@ func (c *SnapshotV1Client) VolumeSnapshotContents() VolumeSnapshotContentInterfa } // NewForConfig creates a new SnapshotV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SnapshotV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SnapshotV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SnapshotV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotclass.go similarity index 97% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotclass.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotclass.go index 7463fb40ea..71ef507f33 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotclass.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotclass.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,8 +22,8 @@ import ( "context" "time" - v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - scheme "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" + v1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + scheme "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotcontent.go similarity index 98% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotcontent.go rename to vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotcontent.go index 3e0504cb01..7f634e68c2 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotcontent.go +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1/volumesnapshotcontent.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,8 +22,8 @@ import ( "context" "time" - v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - scheme "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme" + v1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + scheme "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/vmware/govmomi/.golangci.yml b/vendor/github.com/vmware/govmomi/.golangci.yml index d6392fb948..25242dcaf5 100644 --- a/vendor/github.com/vmware/govmomi/.golangci.yml +++ b/vendor/github.com/vmware/govmomi/.golangci.yml @@ -14,5 +14,6 @@ linters-settings: run: timeout: 6m skip-dirs: + - vim25/json - vim25/xml - cns/types diff --git a/vendor/github.com/vmware/govmomi/.goreleaser.yml b/vendor/github.com/vmware/govmomi/.goreleaser.yml index 76da5f3a35..5da5cd64ef 100644 --- a/vendor/github.com/vmware/govmomi/.goreleaser.yml +++ b/vendor/github.com/vmware/govmomi/.goreleaser.yml @@ -80,7 +80,7 @@ brews: - name: govc ids: - govcbuild - tap: + repository: owner: govmomi name: homebrew-tap # TODO: create token in specified tap repo, add as secret to govmomi repo and reference in release workflow @@ -91,7 +91,7 @@ brews: name: Alfred the Narwhal email: cna-alfred@vmware.com folder: Formula - homepage: "https://github.com/vmware/govmomi/blob/master/govc/README.md" + homepage: "https://github.com/vmware/govmomi/blob/main/govc/README.md" description: "govc is a vSphere CLI built on top of govmomi." test: | system "#{bin}/govc version" @@ -100,7 +100,7 @@ brews: - name: vcsim ids: - vcsimbuild - tap: + repository: owner: govmomi name: homebrew-tap # TODO: create token in specified tap repo, add as secret to govmomi repo and reference in release workflow @@ -111,7 +111,7 @@ brews: name: Alfred the Narwhal email: cna-alfred@vmware.com folder: Formula - homepage: "https://github.com/vmware/govmomi/blob/master/vcsim/README.md" + homepage: "https://github.com/vmware/govmomi/blob/main/vcsim/README.md" description: "vcsim is a vSphere API simulator built on top of govmomi." test: | system "#{bin}/vcsim -h" diff --git a/vendor/github.com/vmware/govmomi/CHANGELOG.md b/vendor/github.com/vmware/govmomi/CHANGELOG.md index 974881f989..f0e5da70d4 100644 --- a/vendor/github.com/vmware/govmomi/CHANGELOG.md +++ b/vendor/github.com/vmware/govmomi/CHANGELOG.md @@ -1,4 +1,112 @@ + +## [Release v0.30.0](https://github.com/vmware/govmomi/compare/v0.29.0...v0.30.0) + +> Release Date: 2022-12-12 + +### 🐞 Fix + +- [1ad33d48] Heal the broken Namespace API +- [22c48147] Update $mktemp to support macOS +- [05b0b08c] DialTLSContext / Go 1.18+ CertificateVerify support + +### 💫 API Changes + +- [58f4112b] Update types to vSphere 8.0 GA +- [ba206c5b] add Content Library security compliance support +- [4c24f821] Add SRIOV device names +- [642156dd] Adds vSphere 7.0u1-u3 support to namespace-management (Tanzu) + +### 💫 `govc` (CLI) + +- [60a18c56] about.cert was not respecting -k +- [15d1181d] bash completion improvements +- [0dbf717b] Add sso.lpp.info and sso.lpp.update commands +- [fe87cff9] host.info: use writer instead of os.stdout +- [a7196e41] host.info: use writer instead of os.stdout +- [3d6de9da] fix host.esxcli runtime error occurred when no arguments specified +- [8c7ba5ef] Add feature in sso.group.ls to list groups using FindGroupsInGroup method +- [dc3e1d79] Add feature sso.group.lsgroups using FindGroupsInGroup method +- [bf991e6e] add event key for json and plain text output +- [2017e846] Support creating content libraries with security policies + +### 💫 `vcsim` (Simulator) + +- [86f9d42a] Update test keys to be RSA 2048 +- [cedf695b] Fix duplicated name check in CloneVM_Task +- [8f4da558] add QueryNetworkHint support for LLDP and CDP details +- [1cab3254] Fix RetrieveProperties path validation to avoid panic +- [7f42a1d2] use node id for ServiceContent.InstanceUuid +- [03319493] Fix snapshot tasks to update rootSnapshot +- [b6ebcb6b] Fix disk capacity validation in ConfigureDevices +- [61032a23] Fix StorageIOAllocationInfo of VirtualDisk +- [cbfe0c93] support disconnect/reconnect host +- [b44828a4] Fix datastore freespace changed by ReconfigVM_Task + +### 📃 Documentation + +- [813a5d88] update `README.md` + +### 🧹 Chore + +- [eabc29ba] Update version.go for v0.30.0 + +### ⚠️ BREAKING + +### 📖 Commits + +- [eabc29ba] chore: Update version.go for v0.30.0 +- [1c919824] Update CONTRIBUTORS for release +- [1ad33d48] fix: Heal the broken Namespace API +- [22c48147] fix: Update $mktemp to support macOS +- [05b0b08c] fix: DialTLSContext / Go 1.18+ CertificateVerify support +- [86f9d42a] vcsim: Update test keys to be RSA 2048 +- [60a18c56] govc: about.cert was not respecting -k +- [58f4112b] api: Update types to vSphere 8.0 GA +- [15d1181d] govc: bash completion improvements +- [c018f078] perms on template files +- [813a5d88] docs: update `README.md` +- [0dbf717b] govc: Add sso.lpp.info and sso.lpp.update commands +- [fe87cff9] govc: host.info: use writer instead of os.stdout +- [a7196e41] govc: host.info: use writer instead of os.stdout +- [ba206c5b] api: add Content Library security compliance support +- [cedf695b] vcsim: Fix duplicated name check in CloneVM_Task +- [8f4da558] vcsim: add QueryNetworkHint support for LLDP and CDP details +- [3b2816ac] Add optional recommRequired PlaceVmsXCluster req arguments +- [f975908a] build(deps): bump andstor/file-existence-action from 1 to 2 +- [1373b80f] build(deps): bump chuhlomin/render-template from 1.5 to 1.6 +- [dc55a27e] build(deps): bump nokogiri from 1.13.6 to 1.13.9 in /gen +- [63980ff2] Fix: use latestPages in task HistoryCollector +- [3d6de9da] govc: fix host.esxcli runtime error occurred when no arguments specified +- [1e9eed94] Update list of projects using govmomi +- [7f4d115c] fixup! api: Add SRIOV device names +- [8f1dc575] Add API cnsreconfigpolicy bindings and static check fixes to cns/client_test.go +- [bf68e8f0] fixup! api: Add SRIOV device names +- [4c24f821] api: Add SRIOV device names +- [c1bb56db] Updated USAGE.md +- [aca677ad] build(deps): bump actions/stale from 5 to 6 +- [1cab3254] vcsim: Fix RetrieveProperties path validation to avoid panic +- [65a6f6bc] Address review comments +- [99d12605] correct new lines in group.ls +- [4d9f6e01] correct new lines in USAGE.md +- [e5bee862] move FindUsersInGroup to users.ls +- [8c7ba5ef] govc: Add feature in sso.group.ls to list groups using FindGroupsInGroup method +- [ee332ae7] Add new query selection parameters to be used for QueryAllVolume/QueryVolumeAsync API +- [dc3e1d79] govc: Add feature sso.group.lsgroups using FindGroupsInGroup method +- [201ae28f] Add common stub for hgfs for non-linux env +- [7f42a1d2] vcsim: use node id for ServiceContent.InstanceUuid +- [76e99b00] Boilerplate check requires a date range in the license. +- [bf991e6e] govc: add event key for json and plain text output +- [4a29caee] add OpenBSD build constraint +- [81bc76bc] toolbbox: add hgfs OpenBSD stub +- [03319493] vcsim: Fix snapshot tasks to update rootSnapshot +- [b6ebcb6b] vcsim: Fix disk capacity validation in ConfigureDevices +- [2017e846] govc: Support creating content libraries with security policies +- [642156dd] api: Adds vSphere 7.0u1-u3 support to namespace-management (Tanzu) +- [61032a23] vcsim: Fix StorageIOAllocationInfo of VirtualDisk +- [cbfe0c93] vcsim: support disconnect/reconnect host +- [b44828a4] vcsim: Fix datastore freespace changed by ReconfigVM_Task + ## [Release v0.29.0](https://github.com/vmware/govmomi/compare/v0.28.0...v0.29.0) diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTING.md b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md index c6d69e5467..038e5321b7 100644 --- a/vendor/github.com/vmware/govmomi/CONTRIBUTING.md +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md @@ -41,7 +41,7 @@ and **supported prefixes**, e.g. `govc: `. ### Example 1 - Fix a Bug in `govmomi` ```bash -git checkout -b issue- vmware/master +git checkout -b issue- main git add git commit -m "fix: ..." -m "Closes: #" git push $USER issue- @@ -50,7 +50,7 @@ git push $USER issue- ### Example 2 - Add a new (non-breaking) API to `govmomi` ```bash -git checkout -b issue- vmware/master +git checkout -b issue- main git add git commit -m "Add API ..." -m "Closes: #" git push $USER issue- @@ -59,7 +59,7 @@ git push $USER issue- ### Example 3 - Add a Feature to `govc` ```bash -git checkout -b issue- vmware/master +git checkout -b issue- main git add git commit -m "govc: Add feature ..." -m "Closes: #" git push $USER issue- @@ -70,7 +70,7 @@ To register the new `govc` command package, add a blank `_` import to `govmomi/g ### Example 4 - Fix a Bug in `vcsim` ```bash -git checkout -b issue- vmware/master +git checkout -b issue- main git add git commit -m "vcsim: Fix ..." -m "Closes: #" git push $USER issue- @@ -87,7 +87,7 @@ Thus these details should be stated at the body of the commit message. Multi-line strings are supported. ```bash -git checkout -b issue- vmware/master +git checkout -b issue- main git add cat << EOF | git commit -F - Add ctx to funcXYZ @@ -103,13 +103,13 @@ git push $USER issue- ### Stay in sync with Upstream -When your branch gets out of sync with the vmware/master branch, use the +When your branch gets out of sync with the main branch, use the following to update (rebase): ```bash git checkout issue- git fetch -a -git rebase vmware/master +git rebase main git push --force-with-lease $USER issue- ``` @@ -139,7 +139,7 @@ Once the review is complete, squash and push your final commit(s): ```bash # squash all commits into one # --autosquash will automatically detect and merge fixup commits -git rebase -i --autosquash vmware/master +git rebase -i --autosquash main git push --force-with-lease $USER issue- ``` diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTORS b/vendor/github.com/vmware/govmomi/CONTRIBUTORS index ef8c56de8b..406173b84c 100644 --- a/vendor/github.com/vmware/govmomi/CONTRIBUTORS +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTORS @@ -9,6 +9,7 @@ Adam Chalkley Adam Fowler Adam Shannon Akanksha Panse +akutz Al Biheiri Alessandro Cortiana Alex @@ -52,7 +53,6 @@ Brian Rak brian57860 Bruce Downs Bryan Venteicher -Cédric Blomart Cheng Cheng Chethan Venkatesh Choudhury Sarada Prasanna Nanda @@ -61,6 +61,7 @@ Christian Höltje Clint Greenwood cpiment CuiHaozhi +Cédric Blomart Dan Ilan Dan Norris Daniel Frederick Crisman @@ -104,6 +105,7 @@ Gavrie Philipson George Hicken Gerrit Renker gthombare +Hakan Halil HakanSunay Hasan Mahmood Haydon Ryan @@ -123,9 +125,9 @@ Jeremy Canady jeremy-clerc Jiatong Wang jingyizPensando -João Pereira Jonas Ausevicius Jorge Sevilla +João Pereira Julien PILLON Justin J. Novack kayrus @@ -140,6 +142,7 @@ Louie Jiang Luther Monson Madanagopal Arunachalam makelarisjr <8687447+makelarisjr@users.noreply.github.com> +Manuel Grandeit maplain Marc Carmier Marcus Tan @@ -164,6 +167,7 @@ Mike Schinkel Mincho Tonev mingwei Nicolas Lamirault +nikhaild <84156354+nikhaild@users.noreply.github.com> Nikhil Kathare Nikhil R Deshpande Nikolas Grottendieck @@ -226,6 +230,7 @@ Tjeu Kayim <15987676+TjeuKayim@users.noreply.github.com> Toomas Pelberg Trevor Dawe tshihad +Ueli Banholzer Uwe Bessle Vadim Egorov Vikram Krishnamurthy @@ -245,6 +250,7 @@ Yi Jiang yiyingy ykakarap Yogesh Sobale <6104071+ysobale@users.noreply.github.com> +Your Name Yue Yin Yun Zhou Yuya Kusakabe diff --git a/vendor/github.com/vmware/govmomi/Makefile b/vendor/github.com/vmware/govmomi/Makefile index 1a89322a32..0f31649af8 100644 --- a/vendor/github.com/vmware/govmomi/Makefile +++ b/vendor/github.com/vmware/govmomi/Makefile @@ -58,7 +58,7 @@ lint-go: $(GOLANGCI_LINT) ## Lint codebase $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_FLAGS) .PHONY: lint-go-full -lint-go-full: GOLANGCI_LINT_FLAGS = --fast=false +lint-go-full: GOLANGCI_LINT_FLAGS = --fast=false --max-same-issues=200 lint-go-full: lint-go ## Run slower linters to detect possible issues .PHONY: fix @@ -107,6 +107,10 @@ doc: install doc: ## Generates govc USAGE.md ./govc/usage.sh > ./govc/USAGE.md +.PHONY: generate-types +generate-types: ## Generate the types + $(MAKE) -C ./gen/ $@ + ## -------------------------------------- ## Tests @@ -132,7 +136,7 @@ endif .PHONY: go-test go-test: ## Runs go unit tests with race detector enabled - GORACE=$(GORACE) $(GO) test \ + GORACE=$(GORACE) CGO_ENABLED=1 $(GO) test \ -count $(TEST_COUNT) \ -race \ -timeout $(TEST_TIMEOUT) \ @@ -143,16 +147,7 @@ go-test: ## Runs go unit tests with race detector enabled govc-test: install govc-test: ## Runs govc bats tests ./govc/test/images/update.sh - (cd govc/test && ./vendor/github.com/sstephenson/bats/libexec/bats -t .) - -.PHONY: govc-test-sso -govc-test-sso: install - ./govc/test/images/update.sh - (cd govc/test && SSO_BATS=1 ./vendor/github.com/sstephenson/bats/libexec/bats -t sso.bats) - -.PHONY: govc-test-sso-assert-cert -govc-test-sso-assert-cert: - SSO_BATS_ASSERT_CERT=1 $(MAKE) govc-test-sso + (cd govc/test && ./vendor/github.com/bats-core/bats-core/bin/bats -t .) .PHONY: test test: go-test govc-test ## Runs go-test and govc-test diff --git a/vendor/github.com/vmware/govmomi/README.md b/vendor/github.com/vmware/govmomi/README.md index 60a18d6e12..73975150c8 100644 --- a/vendor/github.com/vmware/govmomi/README.md +++ b/vendor/github.com/vmware/govmomi/README.md @@ -65,6 +65,7 @@ Refer to the [CHANGELOG][govmomi-changelog] for version to version changes. * [Kubernetes vSphere Cloud Provider][project-k8s-cloud-provider] * [Kubernetes Cluster API][project-k8s-cluster-api] * [OPS][project-nanovms-ops] +* [OpenTelemetry Collector Contrib][opentelemetry-collector-contrib] * [Packer Plugin for VMware vSphere][project-hashicorp-packer-plugin-vsphere] * [Rancher][project-rancher] * [Terraform Provider for VMware vSphere][project-hashicorp-terraform-provider-vsphere] @@ -102,6 +103,7 @@ Follows pyvmomi and rbvmomi: language prefix + the vSphere acronym "VM Object Ma [go-reference]: https://pkg.go.dev/github.com/vmware/govmomi [go-report-card]: https://goreportcard.com/report/github.com/vmware/govmomi [go-version]: https://github.com/vmware/govmomi +[opentelemetry-collector-contrib]: https://github.com/open-telemetry/opentelemetry-collector-contrib [project-docker-linuxKit]: https://github.com/linuxkit/linuxkit/tree/master/src/cmd/linuxkit [project-elastic-agent]: https://github.com/elastic/integrations/tree/main/packages/vsphere [project-gru]: https://github.com/dnaeon/gru diff --git a/vendor/github.com/vmware/govmomi/RELEASE.md b/vendor/github.com/vmware/govmomi/RELEASE.md index 1a12c0640b..3c0965a831 100644 --- a/vendor/github.com/vmware/govmomi/RELEASE.md +++ b/vendor/github.com/vmware/govmomi/RELEASE.md @@ -18,7 +18,7 @@ uses [`goreleaser`](http://goreleaser.com/) and automatically creates/pushes: - Docker images for `vmware/govc` and `vmware/vcsim` to Docker Hub - Source code -Starting with release tag `v0.29.0`, releases are not tagged on the `master` +Starting with release tag `v0.29.0`, releases are not tagged on the `main` branch anymore but a dedicated release branch, for example `release-0.29`. This process has already been followed for patch releases and back-ports. @@ -37,15 +37,15 @@ which can be done through the Github UI or `git` CLI. This guide describes the CLI process. -### Verify `master` branch is up to date with the remote +### Verify `main` branch is up to date with the remote ```console -git checkout master +git checkout main git fetch -avp -git diff master origin/master +git diff main origin/main # if your local and remote branches diverge run -git pull origin/master +git pull origin/main ``` > **Warning** @@ -57,7 +57,7 @@ git pull origin/master ### Create a release branch For new releases, create a release branch from the most recent commit in -`master`, e.g. `release-0.30`. +`main`, e.g. `release-0.30`. ```console export RELEASE_BRANCH=release-0.30 @@ -106,7 +106,7 @@ navigate to `Actions -> Workflows -> Release`. Click `Run Workflow` which opens a dropdown list. -Select the new/updated branch, e.g. `release-0.30`, i.e. **not** the `master` +Select the new/updated branch, e.g. `release-0.30`, i.e. **not** the `main` branch. Specify a semantic `tag` to associate with the release, e.g. `v0.30.0`. @@ -124,7 +124,7 @@ Click `Run Workflow` to kick off the workflow. After successful completion and if the newly created `tag` is the **latest** (semantic version sorted) tag in the repository, a PR is automatically opened -against the `master` branch to update the `CHANGELOG`. Please review and merge +against the `main` branch to update the `CHANGELOG`. Please review and merge accordingly. ## Creating a release before Version `v0.29.0` @@ -133,15 +133,15 @@ The release process before `v0.29.0` differs since it's based on manually creating and pushing tags. Here, on every new tag matching `v*` pushed to the repository a Github Action Release Workflow is executed. -### Verify `master` branch is up to date with the remote +### Verify `main` branch is up to date with the remote ```console -git checkout master +git checkout main git fetch -avp -git diff master origin/master +git diff main origin/main # if your local and remote branches diverge run -git pull origin/master +git pull origin/main ``` > **Warning** diff --git a/vendor/github.com/vmware/govmomi/USAGE.md b/vendor/github.com/vmware/govmomi/USAGE.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/vmware/govmomi/client.go b/vendor/github.com/vmware/govmomi/client.go index ad49fe6bf7..5becedd489 100644 --- a/vendor/github.com/vmware/govmomi/client.go +++ b/vendor/github.com/vmware/govmomi/client.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +19,7 @@ This package is the root package of the govmomi library. The library is structured as follows: -Package vim25 +# Package vim25 The minimal usable functionality is available through the vim25 package. It contains subpackages that contain generated types, managed objects, and all @@ -30,25 +30,25 @@ The vim25 package itself contains a client structure that is passed around throughout the entire library. It abstracts a session and its immutable state. See the vim25 package for more information. -Package session +# Package session The session package contains an abstraction for the session manager that allows a user to login and logout. It also provides access to the current session (i.e. to determine if the user is in fact logged in) -Package object +# Package object The object package contains wrappers for a selection of managed objects. The constructors of these objects all take a *vim25.Client, which they pass along to derived objects, if applicable. -Package govc +# Package govc The govc package contains the govc CLI. The code in this tree is not intended to be used as a library. Any functionality that govc contains that _could_ be used as a library function but isn't, _should_ live in a root level package. -Other packages +# Other packages Other packages, such as "event", "guest", or "license", provide wrappers for the respective subsystems. They are typically not needed in normal workflows so diff --git a/vendor/github.com/vmware/govmomi/cns/client.go b/vendor/github.com/vmware/govmomi/cns/client.go index 037ac2e929..a526ff46f9 100644 --- a/vendor/github.com/vmware/govmomi/cns/client.go +++ b/vendor/github.com/vmware/govmomi/cns/client.go @@ -44,6 +44,10 @@ var ( Type: "CnsVolumeManager", Value: "cns-volume-manager", } + CnsDebugManagerInstance = vimtypes.ManagedObjectReference{ + Type: "CnsDebugManager", + Value: "cns-debug-manager", + } ) type Client struct { @@ -282,3 +286,20 @@ func (c *Client) ReconfigVolumePolicy(ctx context.Context, PolicyReconfigSpecs [ } return object.NewTask(c.vim25Client, res.Returnval), nil } + +// SyncDatastore calls the CnsSyncDatastore API +// Note: To be used only by VMware's internal support tools. +// This API triggers a manual sync of internal CNS and FCD DBs which otherwise happens periodially, +// with fullsync it forces synchronization of complete tables. +func (c *Client) SyncDatastore(ctx context.Context, dsURL string, fullSync bool) (*object.Task, error) { + req := cnstypes.CnsSyncDatastore{ + This: CnsDebugManagerInstance, + DatastoreUrl: dsURL, + FullSync: &fullSync, + } + res, err := methods.CnsSyncDatastore(ctx, c, &req) + if err != nil { + return nil, err + } + return object.NewTask(c.vim25Client, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/cns/methods/methods.go b/vendor/github.com/vmware/govmomi/cns/methods/methods.go index 43f41f3aa5..da9318cac0 100644 --- a/vendor/github.com/vmware/govmomi/cns/methods/methods.go +++ b/vendor/github.com/vmware/govmomi/cns/methods/methods.go @@ -346,3 +346,24 @@ func CnsReconfigVolumePolicy(ctx context.Context, r soap.RoundTripper, req *type return resBody.Res, nil } + +type CnsSyncDatastoreBody struct { + Req *types.CnsSyncDatastore `xml:"urn:vsan CnsSyncDatastore,omitempty"` + Res *types.CnsSyncDatastoreResponse `xml:"urn:vsan CnsSyncDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CnsSyncDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +// Note: To be used only by VMware's internal support tools. +func CnsSyncDatastore(ctx context.Context, r soap.RoundTripper, req *types.CnsSyncDatastore) (*types.CnsSyncDatastoreResponse, error) { + var reqBody, resBody CnsSyncDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/vendor/github.com/vmware/govmomi/cns/types/types.go b/vendor/github.com/vmware/govmomi/cns/types/types.go index 452b9ed387..d493e99c59 100644 --- a/vendor/github.com/vmware/govmomi/cns/types/types.go +++ b/vendor/github.com/vmware/govmomi/cns/types/types.go @@ -909,3 +909,23 @@ type CnsVolumePolicyReconfigSpec struct { func init() { types.Add("vsan:CnsVolumePolicyReconfigSpec", reflect.TypeOf((*CnsVolumePolicyReconfigSpec)(nil)).Elem()) } + +type CnsSyncDatastore CnsSyncDatastoreRequestType + +func init() { + types.Add("vsan:CnsSyncDatastore", reflect.TypeOf((*CnsSyncDatastore)(nil)).Elem()) +} + +type CnsSyncDatastoreRequestType struct { + This types.ManagedObjectReference `xml:"_this"` + DatastoreUrl string `xml:"datastoreUrl,omitempty"` + FullSync *bool `xml:"fullSync"` +} + +func init() { + types.Add("vsan:CnsSyncDatastoreRequestType", reflect.TypeOf((*CnsSyncDatastoreRequestType)(nil)).Elem()) +} + +type CnsSyncDatastoreResponse struct { + Returnval types.ManagedObjectReference `xml:"returnval"` +} diff --git a/vendor/github.com/vmware/govmomi/find/doc.go b/vendor/github.com/vmware/govmomi/find/doc.go index 0c8acee016..d22e883534 100644 --- a/vendor/github.com/vmware/govmomi/find/doc.go +++ b/vendor/github.com/vmware/govmomi/find/doc.go @@ -32,6 +32,6 @@ otherwise "find" mode is used. The exception is to use a "..." wildcard with a path to find all objects recursively underneath any root object. For example: VirtualMachineList("/DC1/...") -See also: https://github.com/vmware/govmomi/blob/master/govc/README.md#usage +See also: https://github.com/vmware/govmomi/blob/main/govc/README.md#usage */ package find diff --git a/vendor/github.com/vmware/govmomi/internal/helpers.go b/vendor/github.com/vmware/govmomi/internal/helpers.go index b3eafeadfd..41e533fd75 100644 --- a/vendor/github.com/vmware/govmomi/internal/helpers.go +++ b/vendor/github.com/vmware/govmomi/internal/helpers.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2020 VMware, Inc. All Rights Reserved. +Copyright (c) 2020-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,13 +17,25 @@ limitations under the License. package internal import ( + "context" + "fmt" "net" + "net/http" + "net/url" + "os" "path" + "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) +const ( + vCenterHostGatewaySocket = "/var/run/envoy-hgw/hgw-pipe" + vCenterHostGatewaySocketEnv = "VCENTER_ENVOY_HOST_GATEWAY" +) + // InventoryPath composed of entities by Name func InventoryPath(entities []mo.ManagedEntity) string { val := "/" @@ -61,3 +73,69 @@ func HostSystemManagementIPs(config []types.VirtualNicManagerNetConfig) []net.IP return ips } + +// UsingEnvoySidecar determines if the given *vim25.Client is using vCenter's +// local Envoy sidecar (as opposed to using the HTTPS port.) +// Returns a boolean indicating whether to use the sidecar or not. +func UsingEnvoySidecar(c *vim25.Client) bool { + envoySidecarPort := os.Getenv("GOVMOMI_ENVOY_SIDECAR_PORT") + if envoySidecarPort == "" { + envoySidecarPort = "1080" + } + envoySidecarHost := os.Getenv("GOVMOMI_ENVOY_SIDECAR_HOST") + if envoySidecarHost == "" { + envoySidecarHost = "localhost" + } + return c.URL().Hostname() == envoySidecarHost && c.URL().Scheme == "http" && c.URL().Port() == envoySidecarPort +} + +// ClientWithEnvoyHostGateway clones the provided soap.Client and returns a new +// one that uses a Unix socket to leverage vCenter's local Envoy host +// gateway. +// This should be used to construct clients that talk to ESX. +// This method returns a new *vim25.Client and does not modify the original input. +// This client disables HTTP keep alives and is intended for a single round +// trip. (eg. guest file transfer, datastore file transfer) +func ClientWithEnvoyHostGateway(vc *vim25.Client) *vim25.Client { + // Override the vim client with a new one that wraps a Unix socket transport. + // Using HTTP here so secure means nothing. + sc := soap.NewClient(vc.URL(), true) + // Clone the underlying HTTP transport, only replacing the dialer logic. + transport := sc.DefaultTransport().Clone() + hostGatewaySocketPath := os.Getenv(vCenterHostGatewaySocketEnv) + if hostGatewaySocketPath == "" { + hostGatewaySocketPath = vCenterHostGatewaySocket + } + transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", hostGatewaySocketPath) + } + // We use this client for a single request, so we don't require keepalives. + transport.DisableKeepAlives = true + sc.Client = http.Client{ + Transport: transport, + } + newVC := &vim25.Client{ + Client: sc, + } + return newVC +} + +// HostGatewayTransferURL rewrites the provided URL to be suitable for use +// with the Envoy host gateway on vCenter. +// It returns a copy of the provided URL with the host, scheme rewritten as needed. +// Receivers of such URLs must typically also use ClientWithEnvoyHostGateway to +// use the appropriate http.Transport to be able to make use of the host +// gateway. +// nil input yields an uninitialized struct. +func HostGatewayTransferURL(u *url.URL, hostMoref types.ManagedObjectReference) *url.URL { + if u == nil { + return &url.URL{} + } + // Make a copy of the provided URL. + turl := *u + turl.Host = "localhost" + turl.Scheme = "http" + oldPath := turl.Path + turl.Path = fmt.Sprintf("/hgw/%s%s", hostMoref.Value, oldPath) + return &turl +} diff --git a/vendor/github.com/vmware/govmomi/internal/version/version.go b/vendor/github.com/vmware/govmomi/internal/version/version.go index dc29e12bf3..6f2ef41516 100644 --- a/vendor/github.com/vmware/govmomi/internal/version/version.go +++ b/vendor/github.com/vmware/govmomi/internal/version/version.go @@ -21,5 +21,5 @@ const ( ClientName = "govmomi" // ClientVersion is the version of this SDK - ClientVersion = "0.30.6" + ClientVersion = "0.32.0" ) diff --git a/vendor/github.com/vmware/govmomi/nfc/lease.go b/vendor/github.com/vmware/govmomi/nfc/lease.go index 4575680336..eb3ef9fcaf 100644 --- a/vendor/github.com/vmware/govmomi/nfc/lease.go +++ b/vendor/github.com/vmware/govmomi/nfc/lease.go @@ -71,17 +71,17 @@ func (l *Lease) Complete(ctx context.Context) error { } // GetManifest wraps methods.GetManifest -func (l *Lease) GetManifest(ctx context.Context) error { +func (l *Lease) GetManifest(ctx context.Context) ([]types.HttpNfcLeaseManifestEntry, error) { req := types.HttpNfcLeaseGetManifest{ This: l.Reference(), } - _, err := methods.HttpNfcLeaseGetManifest(ctx, l.c, &req) + res, err := methods.HttpNfcLeaseGetManifest(ctx, l.c, &req) if err != nil { - return err + return nil, err } - return nil + return res.Returnval, nil } // Progress wraps methods.Progress diff --git a/vendor/github.com/vmware/govmomi/object/compute_resource.go b/vendor/github.com/vmware/govmomi/object/compute_resource.go index 7645fddaf3..4a2db74117 100644 --- a/vendor/github.com/vmware/govmomi/object/compute_resource.go +++ b/vendor/github.com/vmware/govmomi/object/compute_resource.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +18,7 @@ package object import ( "context" + "fmt" "path" "github.com/vmware/govmomi/property" @@ -84,6 +85,21 @@ func (c ComputeResource) Datastores(ctx context.Context) ([]*Datastore, error) { return dss, nil } +func (c ComputeResource) EnvironmentBrowser(ctx context.Context) (*EnvironmentBrowser, error) { + var cr mo.ComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"environmentBrowser"}, &cr) + if err != nil { + return nil, err + } + + if cr.EnvironmentBrowser == nil { + return nil, fmt.Errorf("%s: nil environmentBrowser", c.Reference()) + } + + return NewEnvironmentBrowser(c.c, *cr.EnvironmentBrowser), nil +} + func (c ComputeResource) ResourcePool(ctx context.Context) (*ResourcePool, error) { var cr mo.ComputeResource diff --git a/vendor/github.com/vmware/govmomi/object/datastore.go b/vendor/github.com/vmware/govmomi/object/datastore.go index 65264ae152..b3b7f0bb89 100644 --- a/vendor/github.com/vmware/govmomi/object/datastore.go +++ b/vendor/github.com/vmware/govmomi/object/datastore.go @@ -27,6 +27,7 @@ import ( "path" "strings" + "github.com/vmware/govmomi/internal" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/vim25" @@ -83,8 +84,14 @@ func (d Datastore) Path(path string) string { func (d Datastore) NewURL(path string) *url.URL { u := d.c.URL() + scheme := u.Scheme + // In rare cases where vCenter and ESX are accessed using different schemes. + if overrideScheme := os.Getenv("GOVMOMI_DATASTORE_ACCESS_SCHEME"); overrideScheme != "" { + scheme = overrideScheme + } + return &url.URL{ - Scheme: u.Scheme, + Scheme: scheme, Host: u.Host, Path: fmt.Sprintf("/folder/%s", path), RawQuery: url.Values{ @@ -223,8 +230,18 @@ func (d Datastore) ServiceTicket(ctx context.Context, path string, method string delete(q, "dcPath") u.RawQuery = q.Encode() + // Now that we have a host selected, take a copy of the URL. + transferURL := *u + + if internal.UsingEnvoySidecar(d.Client()) { + // Rewrite the host URL to go through the Envoy sidecar on VC. + // Reciever must use a custom dialer. + u = internal.HostGatewayTransferURL(u, host.Reference()) + } + spec := types.SessionManagerHttpServiceRequestSpec{ - Url: u.String(), + // Use the original URL (without rewrites) for the session ticket. + Url: transferURL.String(), // See SessionManagerHttpServiceRequestSpecMethod enum Method: fmt.Sprintf("http%s%s", method[0:1], strings.ToLower(method[1:])), } @@ -261,7 +278,10 @@ func (d Datastore) uploadTicket(ctx context.Context, path string, param *soap.Up return nil, nil, err } - p.Ticket = ticket + if ticket != nil { + p.Ticket = ticket + p.Close = true // disable Keep-Alive connection to ESX + } return u, &p, nil } @@ -277,7 +297,10 @@ func (d Datastore) downloadTicket(ctx context.Context, path string, param *soap. return nil, nil, err } - p.Ticket = ticket + if ticket != nil { + p.Ticket = ticket + p.Close = true // disable Keep-Alive connection to ESX + } return u, &p, nil } @@ -297,7 +320,13 @@ func (d Datastore) UploadFile(ctx context.Context, file string, path string, par if err != nil { return err } - return d.Client().UploadFile(ctx, file, u, p) + vc := d.Client() + if internal.UsingEnvoySidecar(vc) { + // Override the vim client with a new one that wraps a Unix socket transport. + // Using HTTP here so secure means nothing. + vc = internal.ClientWithEnvoyHostGateway(vc) + } + return vc.UploadFile(ctx, file, u, p) } // Download via soap.Download with an http service ticket @@ -315,7 +344,13 @@ func (d Datastore) DownloadFile(ctx context.Context, path string, file string, p if err != nil { return err } - return d.Client().DownloadFile(ctx, file, u, p) + vc := d.Client() + if internal.UsingEnvoySidecar(vc) { + // Override the vim client with a new one that wraps a Unix socket transport. + // Using HTTP here so secure means nothing. + vc = internal.ClientWithEnvoyHostGateway(vc) + } + return vc.DownloadFile(ctx, file, u, p) } // AttachedHosts returns hosts that have this Datastore attached, accessible and writable. diff --git a/vendor/github.com/vmware/govmomi/object/environment_browser.go b/vendor/github.com/vmware/govmomi/object/environment_browser.go new file mode 100644 index 0000000000..54ab4cb8d5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/environment_browser.go @@ -0,0 +1,98 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type EnvironmentBrowser struct { + Common +} + +func NewEnvironmentBrowser(c *vim25.Client, ref types.ManagedObjectReference) *EnvironmentBrowser { + return &EnvironmentBrowser{ + Common: NewCommon(c, ref), + } +} + +func (b EnvironmentBrowser) QueryConfigTarget(ctx context.Context, host *HostSystem) (*types.ConfigTarget, error) { + req := types.QueryConfigTarget{ + This: b.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.QueryConfigTarget(ctx, b.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (b EnvironmentBrowser) QueryTargetCapabilities(ctx context.Context, host *HostSystem) (*types.HostCapability, error) { + req := types.QueryTargetCapabilities{ + This: b.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.QueryTargetCapabilities(ctx, b.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (b EnvironmentBrowser) QueryConfigOption(ctx context.Context, spec *types.EnvironmentBrowserConfigOptionQuerySpec) (*types.VirtualMachineConfigOption, error) { + req := types.QueryConfigOptionEx{ + This: b.Reference(), + Spec: spec, + } + + res, err := methods.QueryConfigOptionEx(ctx, b.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (b EnvironmentBrowser) QueryConfigOptionDescriptor(ctx context.Context) ([]types.VirtualMachineConfigOptionDescriptor, error) { + req := types.QueryConfigOptionDescriptor{ + This: b.Reference(), + } + + res, err := methods.QueryConfigOptionDescriptor(ctx, b.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_certificate_info.go b/vendor/github.com/vmware/govmomi/object/host_certificate_info.go index bb5ee9c916..fd9b370eba 100644 --- a/vendor/github.com/vmware/govmomi/object/host_certificate_info.go +++ b/vendor/github.com/vmware/govmomi/object/host_certificate_info.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2016 VMware, Inc. All Rights Reserved. +Copyright (c) 2016-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -36,10 +36,10 @@ import ( type HostCertificateInfo struct { types.HostCertificateManagerCertificateInfo - ThumbprintSHA1 string - ThumbprintSHA256 string + ThumbprintSHA1 string `json:"thumbprintSHA1"` + ThumbprintSHA256 string `json:"thumbprintSHA256"` - Err error + Err error `json:"err"` Certificate *x509.Certificate `json:"-"` subjectName *pkix.Name diff --git a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go index 3765506532..92797dcdab 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go @@ -404,9 +404,13 @@ func (l VirtualDeviceList) PickController(kind types.BaseVirtualController) type } // newUnitNumber returns the unit number to use for attaching a new device to the given controller. -func (l VirtualDeviceList) newUnitNumber(c types.BaseVirtualController) int32 { +func (l VirtualDeviceList) newUnitNumber(c types.BaseVirtualController, offset int) int32 { units := make([]bool, 30) + for i := 0; i < offset; i++ { + units[i] = true + } + switch sc := c.(type) { case types.BaseVirtualSCSIController: // The SCSI controller sits on its own bus @@ -455,7 +459,14 @@ func (l VirtualDeviceList) AssignController(device types.BaseVirtualDevice, c ty d := device.GetVirtualDevice() d.ControllerKey = c.GetVirtualController().Key d.UnitNumber = new(int32) - *d.UnitNumber = l.newUnitNumber(c) + + offset := 0 + switch device.(type) { + case types.BaseVirtualEthernetCard: + offset = 7 + } + *d.UnitNumber = l.newUnitNumber(c, offset) + if d.Key == 0 { d.Key = l.newRandomKey() } diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/vendor/github.com/vmware/govmomi/object/virtual_machine.go index eeffc19fd3..4665fcb744 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_machine.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015-2021 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -172,6 +172,15 @@ func (v VirtualMachine) ShutdownGuest(ctx context.Context) error { return err } +func (v VirtualMachine) StandbyGuest(ctx context.Context) error { + req := types.StandbyGuest{ + This: v.Reference(), + } + + _, err := methods.StandbyGuest(ctx, v.c, &req) + return err +} + func (v VirtualMachine) RebootGuest(ctx context.Context) error { req := types.RebootGuest{ This: v.Reference(), @@ -429,6 +438,17 @@ func (v VirtualMachine) Device(ctx context.Context) (VirtualDeviceList, error) { return VirtualDeviceList(o.Config.Hardware.Device), nil } +func (v VirtualMachine) EnvironmentBrowser(ctx context.Context) (*EnvironmentBrowser, error) { + var vm mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"environmentBrowser"}, &vm) + if err != nil { + return nil, err + } + + return NewEnvironmentBrowser(v.c, vm.EnvironmentBrowser), nil +} + func (v VirtualMachine) HostSystem(ctx context.Context) (*HostSystem, error) { var o mo.VirtualMachine @@ -909,27 +929,6 @@ func (v VirtualMachine) Unregister(ctx context.Context) error { return err } -// QueryEnvironmentBrowser is a helper to get the environmentBrowser property. -func (v VirtualMachine) QueryConfigTarget(ctx context.Context) (*types.ConfigTarget, error) { - var vm mo.VirtualMachine - - err := v.Properties(ctx, v.Reference(), []string{"environmentBrowser"}, &vm) - if err != nil { - return nil, err - } - - req := types.QueryConfigTarget{ - This: vm.EnvironmentBrowser, - } - - res, err := methods.QueryConfigTarget(ctx, v.Client(), &req) - if err != nil { - return nil, err - } - - return res.Returnval, nil -} - func (v VirtualMachine) MountToolsInstaller(ctx context.Context) error { req := types.MountToolsInstaller{ This: v.Reference(), diff --git a/vendor/github.com/vmware/govmomi/pbm/methods/methods.go b/vendor/github.com/vmware/govmomi/pbm/methods/methods.go index fa7f2b200f..032c15c54c 100644 --- a/vendor/github.com/vmware/govmomi/pbm/methods/methods.go +++ b/vendor/github.com/vmware/govmomi/pbm/methods/methods.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vmware/govmomi/pbm/types/enum.go b/vendor/github.com/vmware/govmomi/pbm/types/enum.go index 66751bb1f4..be05cfd2a0 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/enum.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/enum.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,8 +25,11 @@ import ( type PbmAssociateAndApplyPolicyStatusPolicyStatus string const ( + // Policy applied successfully. PbmAssociateAndApplyPolicyStatusPolicyStatusSuccess = PbmAssociateAndApplyPolicyStatusPolicyStatus("success") - PbmAssociateAndApplyPolicyStatusPolicyStatusFailed = PbmAssociateAndApplyPolicyStatusPolicyStatus("failed") + // Policy cannot be applied + PbmAssociateAndApplyPolicyStatusPolicyStatusFailed = PbmAssociateAndApplyPolicyStatusPolicyStatus("failed") + // Policy cannot be applied PbmAssociateAndApplyPolicyStatusPolicyStatusInvalid = PbmAssociateAndApplyPolicyStatusPolicyStatus("invalid") ) @@ -34,28 +37,90 @@ func init() { types.Add("pbm:PbmAssociateAndApplyPolicyStatusPolicyStatus", reflect.TypeOf((*PbmAssociateAndApplyPolicyStatusPolicyStatus)(nil)).Elem()) } +// The `PbmBuiltinGenericType_enum` enumerated type defines the list +// of builtin generic datatypes. +// +// See +// `PbmCapabilityGenericTypeInfo*.*PbmCapabilityGenericTypeInfo.genericTypeName`. +// +// A generic datatype indicates how to interpret a collection of values +// of a specific datatype (`PbmCapabilityTypeInfo.typeName`). type PbmBuiltinGenericType string const ( + // Indicates a full or partial range of values (`PbmCapabilityRange`). + // + // A full range specifies both min and max values. + // A partial range specifies one or the other, min or max. PbmBuiltinGenericTypeVMW_RANGE = PbmBuiltinGenericType("VMW_RANGE") - PbmBuiltinGenericTypeVMW_SET = PbmBuiltinGenericType("VMW_SET") + // Indicates a single value or a discrete set of values + // (`PbmCapabilityDiscreteSet`). + PbmBuiltinGenericTypeVMW_SET = PbmBuiltinGenericType("VMW_SET") ) func init() { types.Add("pbm:PbmBuiltinGenericType", reflect.TypeOf((*PbmBuiltinGenericType)(nil)).Elem()) } +// The `PbmBuiltinType_enum` enumerated type defines datatypes +// for storage profiles. +// +// Property metadata +// (`PbmCapabilityPropertyMetadata`) uses the builtin types +// to define data types for storage capabilities and requirements. +// It may also specify the semantics that are applied to a collection +// of builtin type values. See `PbmCapabilityTypeInfo`. +// These semantics are specified as a generic builtin type. +// See `PbmCapabilityGenericTypeInfo`. +// The type information determines how capability constraints are interpreted +// `PbmCapabilityPropertyInstance.value`). type PbmBuiltinType string const ( - PbmBuiltinTypeXSD_LONG = PbmBuiltinType("XSD_LONG") - PbmBuiltinTypeXSD_SHORT = PbmBuiltinType("XSD_SHORT") - PbmBuiltinTypeXSD_INTEGER = PbmBuiltinType("XSD_INTEGER") - PbmBuiltinTypeXSD_INT = PbmBuiltinType("XSD_INT") - PbmBuiltinTypeXSD_STRING = PbmBuiltinType("XSD_STRING") - PbmBuiltinTypeXSD_BOOLEAN = PbmBuiltinType("XSD_BOOLEAN") - PbmBuiltinTypeXSD_DOUBLE = PbmBuiltinType("XSD_DOUBLE") + // Unsigned long value. + // + // This datatype supports the following constraint values. + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) + PbmBuiltinTypeXSD_LONG = PbmBuiltinType("XSD_LONG") + // Datatype not supported. + PbmBuiltinTypeXSD_SHORT = PbmBuiltinType("XSD_SHORT") + // Datatype not supported. + // + // Use XSD\_INT instead. + PbmBuiltinTypeXSD_INTEGER = PbmBuiltinType("XSD_INTEGER") + // Integer value. + // + // This datatype supports the following constraint values. + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) + PbmBuiltinTypeXSD_INT = PbmBuiltinType("XSD_INT") + // String value. + // + // This datatype supports a single value + // or a discrete set of values (`PbmCapabilityDiscreteSet`). + PbmBuiltinTypeXSD_STRING = PbmBuiltinType("XSD_STRING") + // Boolean value. + PbmBuiltinTypeXSD_BOOLEAN = PbmBuiltinType("XSD_BOOLEAN") + // Double precision floating point value. + // + // This datatype supports the following + // constraint values. + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) + PbmBuiltinTypeXSD_DOUBLE = PbmBuiltinType("XSD_DOUBLE") + // Date and time value. PbmBuiltinTypeXSD_DATETIME = PbmBuiltinType("XSD_DATETIME") + // Timespan value (`PbmCapabilityTimeSpan`). + // + // This datatype supports + // the following constraint values. + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) PbmBuiltinTypeVMW_TIMESPAN = PbmBuiltinType("VMW_TIMESPAN") PbmBuiltinTypeVMW_POLICY = PbmBuiltinType("VMW_POLICY") ) @@ -64,6 +129,10 @@ func init() { types.Add("pbm:PbmBuiltinType", reflect.TypeOf((*PbmBuiltinType)(nil)).Elem()) } +// List of operators that are supported for constructing policy. +// +// Currently only tag based properties can use this operator. +// Other operators can be added as required. type PbmCapabilityOperator string const ( @@ -74,52 +143,96 @@ func init() { types.Add("pbm:PbmCapabilityOperator", reflect.TypeOf((*PbmCapabilityOperator)(nil)).Elem()) } +// The `PbmCapabilityTimeUnitType_enum` enumeration type +// defines the supported list of time units for profiles that specify +// time span capabilities and constraints. +// +// See `PbmCapabilityTimeSpan`. type PbmCapabilityTimeUnitType string const ( + // Constraints and capabilities expressed in units of seconds. PbmCapabilityTimeUnitTypeSECONDS = PbmCapabilityTimeUnitType("SECONDS") + // Constraints and capabilities expressed in units of minutes. PbmCapabilityTimeUnitTypeMINUTES = PbmCapabilityTimeUnitType("MINUTES") - PbmCapabilityTimeUnitTypeHOURS = PbmCapabilityTimeUnitType("HOURS") - PbmCapabilityTimeUnitTypeDAYS = PbmCapabilityTimeUnitType("DAYS") - PbmCapabilityTimeUnitTypeWEEKS = PbmCapabilityTimeUnitType("WEEKS") - PbmCapabilityTimeUnitTypeMONTHS = PbmCapabilityTimeUnitType("MONTHS") - PbmCapabilityTimeUnitTypeYEARS = PbmCapabilityTimeUnitType("YEARS") + // Constraints and capabilities expressed in units of hours. + PbmCapabilityTimeUnitTypeHOURS = PbmCapabilityTimeUnitType("HOURS") + // Constraints and capabilities expressed in units of days. + PbmCapabilityTimeUnitTypeDAYS = PbmCapabilityTimeUnitType("DAYS") + // Constraints and capabilities expressed in units of weeks. + PbmCapabilityTimeUnitTypeWEEKS = PbmCapabilityTimeUnitType("WEEKS") + // Constraints and capabilities expressed in units of months. + PbmCapabilityTimeUnitTypeMONTHS = PbmCapabilityTimeUnitType("MONTHS") + // Constraints and capabilities expressed in units of years. + PbmCapabilityTimeUnitTypeYEARS = PbmCapabilityTimeUnitType("YEARS") ) func init() { types.Add("pbm:PbmCapabilityTimeUnitType", reflect.TypeOf((*PbmCapabilityTimeUnitType)(nil)).Elem()) } +// The `PbmComplianceResultComplianceTaskStatus_enum` +// enumeration type defines the set of task status for compliance +// operations. +// +// See `PbmComplianceResult` and +// `PbmRollupComplianceResult`. type PbmComplianceResultComplianceTaskStatus string const ( + // Compliance calculation is in progress. PbmComplianceResultComplianceTaskStatusInProgress = PbmComplianceResultComplianceTaskStatus("inProgress") - PbmComplianceResultComplianceTaskStatusSuccess = PbmComplianceResultComplianceTaskStatus("success") - PbmComplianceResultComplianceTaskStatusFailed = PbmComplianceResultComplianceTaskStatus("failed") + // Compliance calculation has succeeded. + PbmComplianceResultComplianceTaskStatusSuccess = PbmComplianceResultComplianceTaskStatus("success") + // Compliance calculation failed due to some exception. + PbmComplianceResultComplianceTaskStatusFailed = PbmComplianceResultComplianceTaskStatus("failed") ) func init() { types.Add("pbm:PbmComplianceResultComplianceTaskStatus", reflect.TypeOf((*PbmComplianceResultComplianceTaskStatus)(nil)).Elem()) } +// The `PbmComplianceStatus_enum` +// enumeration type defines the set of status values +// for compliance operations. +// +// See `PbmComplianceResult` and +// `PbmRollupComplianceResult`. type PbmComplianceStatus string const ( - PbmComplianceStatusCompliant = PbmComplianceStatus("compliant") - PbmComplianceStatusNonCompliant = PbmComplianceStatus("nonCompliant") - PbmComplianceStatusUnknown = PbmComplianceStatus("unknown") + // Entity is in compliance. + PbmComplianceStatusCompliant = PbmComplianceStatus("compliant") + // Entity is out of compliance. + PbmComplianceStatusNonCompliant = PbmComplianceStatus("nonCompliant") + // Compliance status of the entity is not known. + PbmComplianceStatusUnknown = PbmComplianceStatus("unknown") + // Compliance computation is not applicable for this entity, + // because it does not have any storage requirements that + // apply to the object-based datastore on which this entity is placed. PbmComplianceStatusNotApplicable = PbmComplianceStatus("notApplicable") - PbmComplianceStatusOutOfDate = PbmComplianceStatus("outOfDate") + // This is the same as `PbmComplianceResult.mismatch` + // variable. + // + // Compliance status becomes out-of-date when the profile + // associated with the entity is edited and not applied. The compliance + // status will remain in out-of-date compliance status until the latest + // policy is applied to the entity. + PbmComplianceStatusOutOfDate = PbmComplianceStatus("outOfDate") ) func init() { types.Add("pbm:PbmComplianceStatus", reflect.TypeOf((*PbmComplianceStatus)(nil)).Elem()) } +// This enum corresponds to the keystores used by +// sps. type PbmDebugManagerKeystoreName string const ( - PbmDebugManagerKeystoreNameSMS = PbmDebugManagerKeystoreName("SMS") + // Refers to SMS keystore + PbmDebugManagerKeystoreNameSMS = PbmDebugManagerKeystoreName("SMS") + // Refers to TRUSTED\_ROOTS keystore. PbmDebugManagerKeystoreNameTRUSTED_ROOTS = PbmDebugManagerKeystoreName("TRUSTED_ROOTS") ) @@ -127,12 +240,30 @@ func init() { types.Add("pbm:PbmDebugManagerKeystoreName", reflect.TypeOf((*PbmDebugManagerKeystoreName)(nil)).Elem()) } +// The enumeration type defines the set of health status values for an entity +// that is part of entity health operation. type PbmHealthStatusForEntity string const ( - PbmHealthStatusForEntityRed = PbmHealthStatusForEntity("red") - PbmHealthStatusForEntityYellow = PbmHealthStatusForEntity("yellow") - PbmHealthStatusForEntityGreen = PbmHealthStatusForEntity("green") + // For file share: 'red' if the file server for this file share is in error + // state or any of its backing vSAN objects are degraded. + // + // For FCD: 'red' if the datastore on which the FCD resides is not + // accessible from any of the hosts it is mounted. + PbmHealthStatusForEntityRed = PbmHealthStatusForEntity("red") + // For file share: 'yellow' if some backing objects are repairing, i.e. + // + // warning state. + // For FCD: 'yellow' if the datastore on which the entity resides is + // accessible only from some of the hosts it is mounted but not all. + PbmHealthStatusForEntityYellow = PbmHealthStatusForEntity("yellow") + // For file share: 'green' if the file server for this file share is + // running properly and all its backing vSAN objects are healthy. + // + // For FCD: 'green' if the datastore on which the entity resides + // is accessible from all the hosts it is mounted. + PbmHealthStatusForEntityGreen = PbmHealthStatusForEntity("green") + // If the health status of a file share is unknown, not valid for FCD. PbmHealthStatusForEntityUnknown = PbmHealthStatusForEntity("unknown") ) @@ -140,6 +271,11 @@ func init() { types.Add("pbm:PbmHealthStatusForEntity", reflect.TypeOf((*PbmHealthStatusForEntity)(nil)).Elem()) } +// Recognized types of an IO Filter. +// +// String constant used in `IofilterInfo#filterType`. +// These should match(upper case) the IO Filter classes as defined by IO Filter framework. +// See https://opengrok.eng.vmware.com/source/xref/vmcore-main.perforce.1666/bora/scons/apps/esx/iofilterApps.sc#33 type PbmIofilterInfoFilterType string const ( @@ -156,6 +292,7 @@ func init() { types.Add("pbm:PbmIofilterInfoFilterType", reflect.TypeOf((*PbmIofilterInfoFilterType)(nil)).Elem()) } +// Denotes the line of service of a schema. type PbmLineOfServiceInfoLineOfServiceEnum string const ( @@ -174,28 +311,43 @@ func init() { types.Add("pbm:PbmLineOfServiceInfoLineOfServiceEnum", reflect.TypeOf((*PbmLineOfServiceInfoLineOfServiceEnum)(nil)).Elem()) } +// This enum corresponds to the different packages whose logging +// is configured independently by sps service. type PbmLoggingConfigurationComponent string const ( - PbmLoggingConfigurationComponentPbm = PbmLoggingConfigurationComponent("pbm") - PbmLoggingConfigurationComponentVslm = PbmLoggingConfigurationComponent("vslm") - PbmLoggingConfigurationComponentSms = PbmLoggingConfigurationComponent("sms") - PbmLoggingConfigurationComponentSpbm = PbmLoggingConfigurationComponent("spbm") - PbmLoggingConfigurationComponentSps = PbmLoggingConfigurationComponent("sps") - PbmLoggingConfigurationComponentHttpclient_header = PbmLoggingConfigurationComponent("httpclient_header") + // Modifies logging level of com.vmware.pbm package. + PbmLoggingConfigurationComponentPbm = PbmLoggingConfigurationComponent("pbm") + // Modifies logging level of com.vmware.vslm package. + PbmLoggingConfigurationComponentVslm = PbmLoggingConfigurationComponent("vslm") + // Modifies logging level of com.vmware.vim.sms package. + PbmLoggingConfigurationComponentSms = PbmLoggingConfigurationComponent("sms") + // Modifies logging level of com.vmware.spbm package. + PbmLoggingConfigurationComponentSpbm = PbmLoggingConfigurationComponent("spbm") + // Modifies logging level of com.vmware.sps package. + PbmLoggingConfigurationComponentSps = PbmLoggingConfigurationComponent("sps") + // Modifies logging level of httpclient wire header. + PbmLoggingConfigurationComponentHttpclient_header = PbmLoggingConfigurationComponent("httpclient_header") + // Modifies logging level of httpclient wire content. PbmLoggingConfigurationComponentHttpclient_content = PbmLoggingConfigurationComponent("httpclient_content") - PbmLoggingConfigurationComponentVmomi = PbmLoggingConfigurationComponent("vmomi") + // Modifies logging level of com.vmware.vim.vmomi package. + PbmLoggingConfigurationComponentVmomi = PbmLoggingConfigurationComponent("vmomi") ) func init() { types.Add("pbm:PbmLoggingConfigurationComponent", reflect.TypeOf((*PbmLoggingConfigurationComponent)(nil)).Elem()) } +// This enum corresponds to the different log levels supported +// by sps service. type PbmLoggingConfigurationLogLevel string const ( - PbmLoggingConfigurationLogLevelINFO = PbmLoggingConfigurationLogLevel("INFO") + // Refers to INFO level logging + PbmLoggingConfigurationLogLevelINFO = PbmLoggingConfigurationLogLevel("INFO") + // Refers to DEBUG level logging. PbmLoggingConfigurationLogLevelDEBUG = PbmLoggingConfigurationLogLevel("DEBUG") + // Refers to TRACE level logging. PbmLoggingConfigurationLogLevelTRACE = PbmLoggingConfigurationLogLevel("TRACE") ) @@ -203,42 +355,76 @@ func init() { types.Add("pbm:PbmLoggingConfigurationLogLevel", reflect.TypeOf((*PbmLoggingConfigurationLogLevel)(nil)).Elem()) } +// The `PbmObjectType_enum` enumerated type +// defines vSphere Server object types that are known +// to the Storage Policy Server. +// +// See `PbmServerObjectRef*.*PbmServerObjectRef.objectType`. type PbmObjectType string const ( - PbmObjectTypeVirtualMachine = PbmObjectType("virtualMachine") + // Indicates a virtual machine, not including the disks, identified by the virtual machine + // identifier _virtual-machine-mor_. + PbmObjectTypeVirtualMachine = PbmObjectType("virtualMachine") + // Indicates the virtual machine and all its disks, identified by the virtual machine + // identifier _virtual-machine-mor_. PbmObjectTypeVirtualMachineAndDisks = PbmObjectType("virtualMachineAndDisks") - PbmObjectTypeVirtualDiskId = PbmObjectType("virtualDiskId") - PbmObjectTypeVirtualDiskUUID = PbmObjectType("virtualDiskUUID") - PbmObjectTypeDatastore = PbmObjectType("datastore") - PbmObjectTypeVsanObjectId = PbmObjectType("vsanObjectId") - PbmObjectTypeFileShareId = PbmObjectType("fileShareId") - PbmObjectTypeUnknown = PbmObjectType("unknown") + // Indicates a virtual disk, identified by disk key + // (_virtual-machine-mor_:_disk-key_). + PbmObjectTypeVirtualDiskId = PbmObjectType("virtualDiskId") + // Indicates a virtual disk, identified by UUID - for First Class Storage Object support. + PbmObjectTypeVirtualDiskUUID = PbmObjectType("virtualDiskUUID") + // Indicates a datastore. + PbmObjectTypeDatastore = PbmObjectType("datastore") + // Indicates a VSAN object + PbmObjectTypeVsanObjectId = PbmObjectType("vsanObjectId") + // Indicates a file service + PbmObjectTypeFileShareId = PbmObjectType("fileShareId") + // Unknown object type. + PbmObjectTypeUnknown = PbmObjectType("unknown") ) func init() { types.Add("pbm:PbmObjectType", reflect.TypeOf((*PbmObjectType)(nil)).Elem()) } +// The `PbmOperation_enum` enumerated type +// defines the provisioning operation being performed on the entity like FCD, virtual machine. type PbmOperation string const ( - PbmOperationCREATE = PbmOperation("CREATE") - PbmOperationREGISTER = PbmOperation("REGISTER") + // Indicates create operation of an entity. + PbmOperationCREATE = PbmOperation("CREATE") + // Indicates register operation of an entity. + PbmOperationREGISTER = PbmOperation("REGISTER") + // Indicates reconfigure operation of an entity. PbmOperationRECONFIGURE = PbmOperation("RECONFIGURE") - PbmOperationMIGRATE = PbmOperation("MIGRATE") - PbmOperationCLONE = PbmOperation("CLONE") + // Indicates migrate operation of an entity. + PbmOperationMIGRATE = PbmOperation("MIGRATE") + // Indicates clone operation of an entity. + PbmOperationCLONE = PbmOperation("CLONE") ) func init() { types.Add("pbm:PbmOperation", reflect.TypeOf((*PbmOperation)(nil)).Elem()) } +// Volume allocation type constants. type PbmPolicyAssociationVolumeAllocationType string const ( - PbmPolicyAssociationVolumeAllocationTypeFullyInitialized = PbmPolicyAssociationVolumeAllocationType("FullyInitialized") - PbmPolicyAssociationVolumeAllocationTypeReserveSpace = PbmPolicyAssociationVolumeAllocationType("ReserveSpace") + // Space required is fully allocated and initialized. + // + // It is wiped clean of any previous content on the + // physical media. Gives faster runtime IO performance. + PbmPolicyAssociationVolumeAllocationTypeFullyInitialized = PbmPolicyAssociationVolumeAllocationType("FullyInitialized") + // Space required is fully allocated. + // + // It may contain + // stale data on the physical media. + PbmPolicyAssociationVolumeAllocationTypeReserveSpace = PbmPolicyAssociationVolumeAllocationType("ReserveSpace") + // Space required is allocated and zeroed on demand + // as the space is used. PbmPolicyAssociationVolumeAllocationTypeConserveSpaceWhenPossible = PbmPolicyAssociationVolumeAllocationType("ConserveSpaceWhenPossible") ) @@ -246,11 +432,30 @@ func init() { types.Add("pbm:PbmPolicyAssociationVolumeAllocationType", reflect.TypeOf((*PbmPolicyAssociationVolumeAllocationType)(nil)).Elem()) } +// The `PbmProfileCategoryEnum_enum` +// enumerated type defines the profile categories for a capability-based +// storage profile. +// +// See +// `PbmCapabilityProfile`. type PbmProfileCategoryEnum string const ( - PbmProfileCategoryEnumREQUIREMENT = PbmProfileCategoryEnum("REQUIREMENT") - PbmProfileCategoryEnumRESOURCE = PbmProfileCategoryEnum("RESOURCE") + // Indicates a storage requirement. + // + // Requirements are based on + // storage capabilities. + PbmProfileCategoryEnumREQUIREMENT = PbmProfileCategoryEnum("REQUIREMENT") + // Indicates a storage capability. + // + // Storage capabilities + // are defined by storage providers. + PbmProfileCategoryEnumRESOURCE = PbmProfileCategoryEnum("RESOURCE") + // Indicates a data service policy that can be embedded into + // another storage policy. + // + // Policies of this type can't be assigned to + // Virtual Machines or Virtual Disks. PbmProfileCategoryEnumDATA_SERVICE_POLICY = PbmProfileCategoryEnum("DATA_SERVICE_POLICY") ) @@ -258,9 +463,14 @@ func init() { types.Add("pbm:PbmProfileCategoryEnum", reflect.TypeOf((*PbmProfileCategoryEnum)(nil)).Elem()) } +// The `PbmProfileResourceTypeEnum_enum` enumerated type defines the set of resource +// types that are supported for profile management. +// +// See `PbmProfileResourceType`. type PbmProfileResourceTypeEnum string const ( + // Indicates resources that support storage profiles. PbmProfileResourceTypeEnumSTORAGE = PbmProfileResourceTypeEnum("STORAGE") ) @@ -268,12 +478,21 @@ func init() { types.Add("pbm:PbmProfileResourceTypeEnum", reflect.TypeOf((*PbmProfileResourceTypeEnum)(nil)).Elem()) } +// System pre-created profile types. type PbmSystemCreatedProfileType string const ( - PbmSystemCreatedProfileTypeVsanDefaultProfile = PbmSystemCreatedProfileType("VsanDefaultProfile") - PbmSystemCreatedProfileTypeVVolDefaultProfile = PbmSystemCreatedProfileType("VVolDefaultProfile") - PbmSystemCreatedProfileTypePmemDefaultProfile = PbmSystemCreatedProfileType("PmemDefaultProfile") + // Indicates the system pre-created editable VSAN default profile. + PbmSystemCreatedProfileTypeVsanDefaultProfile = PbmSystemCreatedProfileType("VsanDefaultProfile") + // Indicates the system pre-created non-editable default profile + // for VVOL datastores. + PbmSystemCreatedProfileTypeVVolDefaultProfile = PbmSystemCreatedProfileType("VVolDefaultProfile") + // Indicates the system pre-created non-editable default profile + // for PMem datastores + PbmSystemCreatedProfileTypePmemDefaultProfile = PbmSystemCreatedProfileType("PmemDefaultProfile") + // Indicates the system pre-created non-editable VMC default profile. + PbmSystemCreatedProfileTypeVmcManagementProfile = PbmSystemCreatedProfileType("VmcManagementProfile") + // Indicates the system pre-created non-editable VSANMAX default profile. PbmSystemCreatedProfileTypeVsanMaxDefaultProfile = PbmSystemCreatedProfileType("VsanMaxDefaultProfile") ) @@ -281,25 +500,39 @@ func init() { types.Add("pbm:PbmSystemCreatedProfileType", reflect.TypeOf((*PbmSystemCreatedProfileType)(nil)).Elem()) } +// The `PbmVmOperation_enum` enumerated type +// defines the provisioning operation being performed on the virtual machine. type PbmVmOperation string const ( - PbmVmOperationCREATE = PbmVmOperation("CREATE") + // Indicates create operation of a virtual machine. + PbmVmOperationCREATE = PbmVmOperation("CREATE") + // Indicates reconfigure operation of a virtual machine. PbmVmOperationRECONFIGURE = PbmVmOperation("RECONFIGURE") - PbmVmOperationMIGRATE = PbmVmOperation("MIGRATE") - PbmVmOperationCLONE = PbmVmOperation("CLONE") + // Indicates migrate operation of a virtual machine. + PbmVmOperationMIGRATE = PbmVmOperation("MIGRATE") + // Indicates clone operation of a virtual machine. + PbmVmOperationCLONE = PbmVmOperation("CLONE") ) func init() { types.Add("pbm:PbmVmOperation", reflect.TypeOf((*PbmVmOperation)(nil)).Elem()) } +// The `PbmVvolType_enum` enumeration type +// defines VVOL types. +// +// VvolType's are referenced to specify which objectType +// to fetch for default capability. type PbmVvolType string const ( + // meta-data volume PbmVvolTypeConfig = PbmVvolType("Config") - PbmVvolTypeData = PbmVvolType("Data") - PbmVvolTypeSwap = PbmVvolType("Swap") + // vmdk volume + PbmVvolTypeData = PbmVvolType("Data") + // swap volume + PbmVvolTypeSwap = PbmVvolType("Swap") ) func init() { diff --git a/vendor/github.com/vmware/govmomi/pbm/types/if.go b/vendor/github.com/vmware/govmomi/pbm/types/if.go index a740a25dab..4008dffff7 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/if.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/if.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vmware/govmomi/pbm/types/types.go b/vendor/github.com/vmware/govmomi/pbm/types/types.go index 1687df447d..4c6f72caec 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/types.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/types.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2022 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,246 +23,304 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +// A boxed array of `PbmCapabilityConstraintInstance`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityConstraintInstance struct { - PbmCapabilityConstraintInstance []PbmCapabilityConstraintInstance `xml:"PbmCapabilityConstraintInstance,omitempty"` + PbmCapabilityConstraintInstance []PbmCapabilityConstraintInstance `xml:"PbmCapabilityConstraintInstance,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityConstraintInstance", reflect.TypeOf((*ArrayOfPbmCapabilityConstraintInstance)(nil)).Elem()) } +// A boxed array of `PbmCapabilityInstance`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityInstance struct { - PbmCapabilityInstance []PbmCapabilityInstance `xml:"PbmCapabilityInstance,omitempty"` + PbmCapabilityInstance []PbmCapabilityInstance `xml:"PbmCapabilityInstance,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityInstance", reflect.TypeOf((*ArrayOfPbmCapabilityInstance)(nil)).Elem()) } +// A boxed array of `PbmCapabilityMetadata`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityMetadata struct { - PbmCapabilityMetadata []PbmCapabilityMetadata `xml:"PbmCapabilityMetadata,omitempty"` + PbmCapabilityMetadata []PbmCapabilityMetadata `xml:"PbmCapabilityMetadata,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityMetadata", reflect.TypeOf((*ArrayOfPbmCapabilityMetadata)(nil)).Elem()) } +// A boxed array of `PbmCapabilityMetadataPerCategory`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityMetadataPerCategory struct { - PbmCapabilityMetadataPerCategory []PbmCapabilityMetadataPerCategory `xml:"PbmCapabilityMetadataPerCategory,omitempty"` + PbmCapabilityMetadataPerCategory []PbmCapabilityMetadataPerCategory `xml:"PbmCapabilityMetadataPerCategory,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityMetadataPerCategory", reflect.TypeOf((*ArrayOfPbmCapabilityMetadataPerCategory)(nil)).Elem()) } +// A boxed array of `PbmCapabilityPropertyInstance`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityPropertyInstance struct { - PbmCapabilityPropertyInstance []PbmCapabilityPropertyInstance `xml:"PbmCapabilityPropertyInstance,omitempty"` + PbmCapabilityPropertyInstance []PbmCapabilityPropertyInstance `xml:"PbmCapabilityPropertyInstance,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityPropertyInstance", reflect.TypeOf((*ArrayOfPbmCapabilityPropertyInstance)(nil)).Elem()) } +// A boxed array of `PbmCapabilityPropertyMetadata`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityPropertyMetadata struct { - PbmCapabilityPropertyMetadata []PbmCapabilityPropertyMetadata `xml:"PbmCapabilityPropertyMetadata,omitempty"` + PbmCapabilityPropertyMetadata []PbmCapabilityPropertyMetadata `xml:"PbmCapabilityPropertyMetadata,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityPropertyMetadata", reflect.TypeOf((*ArrayOfPbmCapabilityPropertyMetadata)(nil)).Elem()) } +// A boxed array of `PbmCapabilitySchema`. To be used in `Any` placeholders. type ArrayOfPbmCapabilitySchema struct { - PbmCapabilitySchema []PbmCapabilitySchema `xml:"PbmCapabilitySchema,omitempty"` + PbmCapabilitySchema []PbmCapabilitySchema `xml:"PbmCapabilitySchema,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilitySchema", reflect.TypeOf((*ArrayOfPbmCapabilitySchema)(nil)).Elem()) } +// A boxed array of `PbmCapabilitySubProfile`. To be used in `Any` placeholders. type ArrayOfPbmCapabilitySubProfile struct { - PbmCapabilitySubProfile []PbmCapabilitySubProfile `xml:"PbmCapabilitySubProfile,omitempty"` + PbmCapabilitySubProfile []PbmCapabilitySubProfile `xml:"PbmCapabilitySubProfile,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilitySubProfile", reflect.TypeOf((*ArrayOfPbmCapabilitySubProfile)(nil)).Elem()) } +// A boxed array of `PbmCapabilityVendorNamespaceInfo`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityVendorNamespaceInfo struct { - PbmCapabilityVendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:"PbmCapabilityVendorNamespaceInfo,omitempty"` + PbmCapabilityVendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:"PbmCapabilityVendorNamespaceInfo,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityVendorNamespaceInfo", reflect.TypeOf((*ArrayOfPbmCapabilityVendorNamespaceInfo)(nil)).Elem()) } +// A boxed array of `PbmCapabilityVendorResourceTypeInfo`. To be used in `Any` placeholders. type ArrayOfPbmCapabilityVendorResourceTypeInfo struct { - PbmCapabilityVendorResourceTypeInfo []PbmCapabilityVendorResourceTypeInfo `xml:"PbmCapabilityVendorResourceTypeInfo,omitempty"` + PbmCapabilityVendorResourceTypeInfo []PbmCapabilityVendorResourceTypeInfo `xml:"PbmCapabilityVendorResourceTypeInfo,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCapabilityVendorResourceTypeInfo", reflect.TypeOf((*ArrayOfPbmCapabilityVendorResourceTypeInfo)(nil)).Elem()) } +// A boxed array of `PbmCompliancePolicyStatus`. To be used in `Any` placeholders. type ArrayOfPbmCompliancePolicyStatus struct { - PbmCompliancePolicyStatus []PbmCompliancePolicyStatus `xml:"PbmCompliancePolicyStatus,omitempty"` + PbmCompliancePolicyStatus []PbmCompliancePolicyStatus `xml:"PbmCompliancePolicyStatus,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmCompliancePolicyStatus", reflect.TypeOf((*ArrayOfPbmCompliancePolicyStatus)(nil)).Elem()) } +// A boxed array of `PbmComplianceResult`. To be used in `Any` placeholders. type ArrayOfPbmComplianceResult struct { - PbmComplianceResult []PbmComplianceResult `xml:"PbmComplianceResult,omitempty"` + PbmComplianceResult []PbmComplianceResult `xml:"PbmComplianceResult,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmComplianceResult", reflect.TypeOf((*ArrayOfPbmComplianceResult)(nil)).Elem()) } +// A boxed array of `PbmDatastoreSpaceStatistics`. To be used in `Any` placeholders. type ArrayOfPbmDatastoreSpaceStatistics struct { - PbmDatastoreSpaceStatistics []PbmDatastoreSpaceStatistics `xml:"PbmDatastoreSpaceStatistics,omitempty"` + PbmDatastoreSpaceStatistics []PbmDatastoreSpaceStatistics `xml:"PbmDatastoreSpaceStatistics,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmDatastoreSpaceStatistics", reflect.TypeOf((*ArrayOfPbmDatastoreSpaceStatistics)(nil)).Elem()) } +// A boxed array of `PbmDefaultProfileInfo`. To be used in `Any` placeholders. type ArrayOfPbmDefaultProfileInfo struct { - PbmDefaultProfileInfo []PbmDefaultProfileInfo `xml:"PbmDefaultProfileInfo,omitempty"` + PbmDefaultProfileInfo []PbmDefaultProfileInfo `xml:"PbmDefaultProfileInfo,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmDefaultProfileInfo", reflect.TypeOf((*ArrayOfPbmDefaultProfileInfo)(nil)).Elem()) } +// A boxed array of `PbmFaultNoPermissionEntityPrivileges`. To be used in `Any` placeholders. +type ArrayOfPbmFaultNoPermissionEntityPrivileges struct { + PbmFaultNoPermissionEntityPrivileges []PbmFaultNoPermissionEntityPrivileges `xml:"PbmFaultNoPermissionEntityPrivileges,omitempty" json:"_value"` +} + +func init() { + types.Add("pbm:ArrayOfPbmFaultNoPermissionEntityPrivileges", reflect.TypeOf((*ArrayOfPbmFaultNoPermissionEntityPrivileges)(nil)).Elem()) +} + +// A boxed array of `PbmLoggingConfiguration`. To be used in `Any` placeholders. +type ArrayOfPbmLoggingConfiguration struct { + PbmLoggingConfiguration []PbmLoggingConfiguration `xml:"PbmLoggingConfiguration,omitempty" json:"_value"` +} + +func init() { + types.Add("pbm:ArrayOfPbmLoggingConfiguration", reflect.TypeOf((*ArrayOfPbmLoggingConfiguration)(nil)).Elem()) +} + +// A boxed array of `PbmPlacementCompatibilityResult`. To be used in `Any` placeholders. type ArrayOfPbmPlacementCompatibilityResult struct { - PbmPlacementCompatibilityResult []PbmPlacementCompatibilityResult `xml:"PbmPlacementCompatibilityResult,omitempty"` + PbmPlacementCompatibilityResult []PbmPlacementCompatibilityResult `xml:"PbmPlacementCompatibilityResult,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmPlacementCompatibilityResult", reflect.TypeOf((*ArrayOfPbmPlacementCompatibilityResult)(nil)).Elem()) } +// A boxed array of `PbmPlacementHub`. To be used in `Any` placeholders. type ArrayOfPbmPlacementHub struct { - PbmPlacementHub []PbmPlacementHub `xml:"PbmPlacementHub,omitempty"` + PbmPlacementHub []PbmPlacementHub `xml:"PbmPlacementHub,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmPlacementHub", reflect.TypeOf((*ArrayOfPbmPlacementHub)(nil)).Elem()) } +// A boxed array of `PbmPlacementMatchingResources`. To be used in `Any` placeholders. type ArrayOfPbmPlacementMatchingResources struct { - PbmPlacementMatchingResources []BasePbmPlacementMatchingResources `xml:"PbmPlacementMatchingResources,omitempty,typeattr"` + PbmPlacementMatchingResources []BasePbmPlacementMatchingResources `xml:"PbmPlacementMatchingResources,omitempty,typeattr" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmPlacementMatchingResources", reflect.TypeOf((*ArrayOfPbmPlacementMatchingResources)(nil)).Elem()) } +// A boxed array of `PbmPlacementRequirement`. To be used in `Any` placeholders. type ArrayOfPbmPlacementRequirement struct { - PbmPlacementRequirement []BasePbmPlacementRequirement `xml:"PbmPlacementRequirement,omitempty,typeattr"` + PbmPlacementRequirement []BasePbmPlacementRequirement `xml:"PbmPlacementRequirement,omitempty,typeattr" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmPlacementRequirement", reflect.TypeOf((*ArrayOfPbmPlacementRequirement)(nil)).Elem()) } +// A boxed array of `PbmPlacementResourceUtilization`. To be used in `Any` placeholders. type ArrayOfPbmPlacementResourceUtilization struct { - PbmPlacementResourceUtilization []PbmPlacementResourceUtilization `xml:"PbmPlacementResourceUtilization,omitempty"` + PbmPlacementResourceUtilization []PbmPlacementResourceUtilization `xml:"PbmPlacementResourceUtilization,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmPlacementResourceUtilization", reflect.TypeOf((*ArrayOfPbmPlacementResourceUtilization)(nil)).Elem()) } +// A boxed array of `PbmProfile`. To be used in `Any` placeholders. type ArrayOfPbmProfile struct { - PbmProfile []BasePbmProfile `xml:"PbmProfile,omitempty,typeattr"` + PbmProfile []BasePbmProfile `xml:"PbmProfile,omitempty,typeattr" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmProfile", reflect.TypeOf((*ArrayOfPbmProfile)(nil)).Elem()) } +// A boxed array of `PbmProfileId`. To be used in `Any` placeholders. type ArrayOfPbmProfileId struct { - PbmProfileId []PbmProfileId `xml:"PbmProfileId,omitempty"` + PbmProfileId []PbmProfileId `xml:"PbmProfileId,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmProfileId", reflect.TypeOf((*ArrayOfPbmProfileId)(nil)).Elem()) } +// A boxed array of `PbmProfileOperationOutcome`. To be used in `Any` placeholders. type ArrayOfPbmProfileOperationOutcome struct { - PbmProfileOperationOutcome []PbmProfileOperationOutcome `xml:"PbmProfileOperationOutcome,omitempty"` + PbmProfileOperationOutcome []PbmProfileOperationOutcome `xml:"PbmProfileOperationOutcome,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmProfileOperationOutcome", reflect.TypeOf((*ArrayOfPbmProfileOperationOutcome)(nil)).Elem()) } +// A boxed array of `PbmProfileResourceType`. To be used in `Any` placeholders. type ArrayOfPbmProfileResourceType struct { - PbmProfileResourceType []PbmProfileResourceType `xml:"PbmProfileResourceType,omitempty"` + PbmProfileResourceType []PbmProfileResourceType `xml:"PbmProfileResourceType,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmProfileResourceType", reflect.TypeOf((*ArrayOfPbmProfileResourceType)(nil)).Elem()) } +// A boxed array of `PbmProfileType`. To be used in `Any` placeholders. type ArrayOfPbmProfileType struct { - PbmProfileType []PbmProfileType `xml:"PbmProfileType,omitempty"` + PbmProfileType []PbmProfileType `xml:"PbmProfileType,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmProfileType", reflect.TypeOf((*ArrayOfPbmProfileType)(nil)).Elem()) } +// A boxed array of `PbmQueryProfileResult`. To be used in `Any` placeholders. type ArrayOfPbmQueryProfileResult struct { - PbmQueryProfileResult []PbmQueryProfileResult `xml:"PbmQueryProfileResult,omitempty"` + PbmQueryProfileResult []PbmQueryProfileResult `xml:"PbmQueryProfileResult,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmQueryProfileResult", reflect.TypeOf((*ArrayOfPbmQueryProfileResult)(nil)).Elem()) } +// A boxed array of `PbmQueryReplicationGroupResult`. To be used in `Any` placeholders. type ArrayOfPbmQueryReplicationGroupResult struct { - PbmQueryReplicationGroupResult []PbmQueryReplicationGroupResult `xml:"PbmQueryReplicationGroupResult,omitempty"` + PbmQueryReplicationGroupResult []PbmQueryReplicationGroupResult `xml:"PbmQueryReplicationGroupResult,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmQueryReplicationGroupResult", reflect.TypeOf((*ArrayOfPbmQueryReplicationGroupResult)(nil)).Elem()) } +// A boxed array of `PbmRollupComplianceResult`. To be used in `Any` placeholders. type ArrayOfPbmRollupComplianceResult struct { - PbmRollupComplianceResult []PbmRollupComplianceResult `xml:"PbmRollupComplianceResult,omitempty"` + PbmRollupComplianceResult []PbmRollupComplianceResult `xml:"PbmRollupComplianceResult,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmRollupComplianceResult", reflect.TypeOf((*ArrayOfPbmRollupComplianceResult)(nil)).Elem()) } +// A boxed array of `PbmServerObjectRef`. To be used in `Any` placeholders. type ArrayOfPbmServerObjectRef struct { - PbmServerObjectRef []PbmServerObjectRef `xml:"PbmServerObjectRef,omitempty"` + PbmServerObjectRef []PbmServerObjectRef `xml:"PbmServerObjectRef,omitempty" json:"_value"` } func init() { types.Add("pbm:ArrayOfPbmServerObjectRef", reflect.TypeOf((*ArrayOfPbmServerObjectRef)(nil)).Elem()) } +// The `PbmAboutInfo` data object stores identifying data +// about the Storage Policy Server. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmAboutInfo struct { types.DynamicData - Name string `xml:"name"` - Version string `xml:"version"` - InstanceUuid string `xml:"instanceUuid"` + // Name of the server. + Name string `xml:"name" json:"name"` + // Version number. + Version string `xml:"version" json:"version"` + // Globally unique identifier associated with this server instance. + InstanceUuid string `xml:"instanceUuid" json:"instanceUuid"` } func init() { types.Add("pbm:PbmAboutInfo", reflect.TypeOf((*PbmAboutInfo)(nil)).Elem()) } +// An AlreadyExists fault is thrown when an attempt is made to add an element to +// a collection, if the element's key, name, or identifier already exists in +// that collection. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmAlreadyExists struct { PbmFault - Name string `xml:"name,omitempty"` + Name string `xml:"name,omitempty" json:"name,omitempty"` } func init() { @@ -281,10 +339,13 @@ func init() { types.Add("pbm:PbmAssignDefaultRequirementProfile", reflect.TypeOf((*PbmAssignDefaultRequirementProfile)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmAssignDefaultRequirementProfile`. type PbmAssignDefaultRequirementProfileRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Profile PbmProfileId `xml:"profile"` - Datastores []PbmPlacementHub `xml:"datastores"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // The profile that needs to be made default profile. + Profile PbmProfileId `xml:"profile" json:"profile"` + // The datastores for which the profile needs to be made as default profile. + Datastores []PbmPlacementHub `xml:"datastores" json:"datastores"` } func init() { @@ -294,16 +355,26 @@ func init() { type PbmAssignDefaultRequirementProfileResponse struct { } +// Constraints on the properties for a single occurrence of a capability. +// +// All properties must satisfy their respective constraints to be compliant. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityConstraintInstance struct { types.DynamicData - PropertyInstance []PbmCapabilityPropertyInstance `xml:"propertyInstance"` + // Property instance array for this constraint + PropertyInstance []PbmCapabilityPropertyInstance `xml:"propertyInstance" json:"propertyInstance"` } func init() { types.Add("pbm:PbmCapabilityConstraintInstance", reflect.TypeOf((*PbmCapabilityConstraintInstance)(nil)).Elem()) } +// The `PbmCapabilityConstraints` data object is the base +// object for capability subprofile constraints. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityConstraints struct { types.DynamicData } @@ -312,69 +383,161 @@ func init() { types.Add("pbm:PbmCapabilityConstraints", reflect.TypeOf((*PbmCapabilityConstraints)(nil)).Elem()) } +// A property value with description. +// +// It can be repeated under DiscreteSet. +// E.g., set of tags, each with description and tag name. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityDescription struct { types.DynamicData - Description PbmExtendedElementDescription `xml:"description"` - Value types.AnyType `xml:"value,typeattr"` + // Description of the property value + Description PbmExtendedElementDescription `xml:"description" json:"description"` + // Values for the set. + // + // must be one of the supported datatypes as + // defined in `PbmBuiltinType_enum` + // Must only contain unique values to comply with the Set semantics + Value types.AnyType `xml:"value,typeattr" json:"value"` } func init() { types.Add("pbm:PbmCapabilityDescription", reflect.TypeOf((*PbmCapabilityDescription)(nil)).Elem()) } +// The `PbmCapabilityDiscreteSet` data object defines a set of values +// for storage profile property instances (`PbmCapabilityPropertyInstance`). +// +// Use the discrete set type to define a set of values of a supported builtin type +// (`PbmBuiltinType_enum`), for example a set of integers +// (XSD\_INT) or a set of unsigned long values (XSD\_LONG). +// See `PbmBuiltinGenericType_enum*.*VMW_SET`. +// +// A discrete set of values is declared as an array of xsd:anyType values. +// - When you define a property instance for a storage profile requirement +// and pass an array of values to the Server, you must set the array elements +// to values of the appropriate datatype. +// - When you read a discrete set from a property instance for a storage profile +// capability, you must cast the xsd:anyType array element values +// to the appropriate datatype. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityDiscreteSet struct { types.DynamicData - Values []types.AnyType `xml:"values,typeattr"` + // Array of values for the set. + // + // The values must be one of the supported datatypes + // as defined in `PbmBuiltinType_enum` or `PbmBuiltinGenericType_enum`. + Values []types.AnyType `xml:"values,typeattr" json:"values"` } func init() { types.Add("pbm:PbmCapabilityDiscreteSet", reflect.TypeOf((*PbmCapabilityDiscreteSet)(nil)).Elem()) } +// Generic type definition for capabilities. +// +// Indicates how a collection of values of a specific datatype +// (`PbmCapabilityTypeInfo.typeName`) +// will be interpreted. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityGenericTypeInfo struct { PbmCapabilityTypeInfo - GenericTypeName string `xml:"genericTypeName"` + // Name of the generic type. + // + // Must correspond to one of the values defined in + // `PbmBuiltinGenericType_enum`. + GenericTypeName string `xml:"genericTypeName" json:"genericTypeName"` } func init() { types.Add("pbm:PbmCapabilityGenericTypeInfo", reflect.TypeOf((*PbmCapabilityGenericTypeInfo)(nil)).Elem()) } +// The `PbmCapabilityInstance` data object defines a storage capability instance. +// +// Metadata for the capability is described in `PbmCapabilityMetadata`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityInstance struct { types.DynamicData - Id PbmCapabilityMetadataUniqueId `xml:"id"` - Constraint []PbmCapabilityConstraintInstance `xml:"constraint"` + // Identifier for the capability. + // + // The identifier value corresponds to + // `PbmCapabilityMetadata*.*PbmCapabilityMetadata.id`. + Id PbmCapabilityMetadataUniqueId `xml:"id" json:"id"` + // Constraints on the properties that comprise this capability. + // + // Each entry represents a constraint on one or more of the properties that + // constitute this capability. A datum must meet one of the + // constraints to be compliant. + Constraint []PbmCapabilityConstraintInstance `xml:"constraint" json:"constraint"` } func init() { types.Add("pbm:PbmCapabilityInstance", reflect.TypeOf((*PbmCapabilityInstance)(nil)).Elem()) } +// Metadata for a single unique setting defined by a provider. +// +// A simple setting is a setting with one property. +// A complex setting contains more than one property. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityMetadata struct { types.DynamicData - Id PbmCapabilityMetadataUniqueId `xml:"id"` - Summary PbmExtendedElementDescription `xml:"summary"` - Mandatory *bool `xml:"mandatory"` - Hint *bool `xml:"hint"` - KeyId string `xml:"keyId,omitempty"` - AllowMultipleConstraints *bool `xml:"allowMultipleConstraints"` - PropertyMetadata []PbmCapabilityPropertyMetadata `xml:"propertyMetadata"` + // Unique identifier for the capability. + Id PbmCapabilityMetadataUniqueId `xml:"id" json:"id"` + // Capability name and description + Summary PbmExtendedElementDescription `xml:"summary" json:"summary"` + // Indicates whether incorporating given capability is mandatory during creation of + // profile. + Mandatory *bool `xml:"mandatory" json:"mandatory,omitempty"` + // The flag hint dictates the interpretation of constraints specified for this capability + // in a storage policy profile. + // + // If hint is false, then constraints will affect placement. + // If hint is true, constraints will not affect placement, + // but will still be passed to provisioning operations if the provider understands the + // relevant namespace. Optional property, false if not set. + Hint *bool `xml:"hint" json:"hint,omitempty"` + // Property Id of the key property, if this capability represents a key + // value pair. + // + // Value is empty string if not set. + KeyId string `xml:"keyId,omitempty" json:"keyId,omitempty"` + // Flag to indicate if multiple constraints are allowed in the capability + // instance. + // + // False if not set. + AllowMultipleConstraints *bool `xml:"allowMultipleConstraints" json:"allowMultipleConstraints,omitempty"` + // Metadata for the properties that comprise this capability. + PropertyMetadata []PbmCapabilityPropertyMetadata `xml:"propertyMetadata" json:"propertyMetadata"` } func init() { types.Add("pbm:PbmCapabilityMetadata", reflect.TypeOf((*PbmCapabilityMetadata)(nil)).Elem()) } +// The `PbmCapabilityMetadataPerCategory` +// data object defines capability metadata for a profile subcategory. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityMetadataPerCategory struct { types.DynamicData - SubCategory string `xml:"subCategory"` - CapabilityMetadata []PbmCapabilityMetadata `xml:"capabilityMetadata"` + // Profile subcategory to which the capability metadata belongs. + // + // The subcategory is specified by the storage provider. + SubCategory string `xml:"subCategory" json:"subCategory"` + // Capability metadata for this category + CapabilityMetadata []PbmCapabilityMetadata `xml:"capabilityMetadata" json:"capabilityMetadata"` } func init() { @@ -384,60 +547,162 @@ func init() { type PbmCapabilityMetadataUniqueId struct { types.DynamicData - Namespace string `xml:"namespace"` - Id string `xml:"id"` + // Namespace to which this capability belongs. + // + // Must be the same as + // { @link CapabilityObjectSchema#namespace } defined for this + // capability + Namespace string `xml:"namespace" json:"namespace"` + // unique identifier for this capability within given namespace + Id string `xml:"id" json:"id"` } func init() { types.Add("pbm:PbmCapabilityMetadataUniqueId", reflect.TypeOf((*PbmCapabilityMetadataUniqueId)(nil)).Elem()) } +// Name space information for the capability metadata schema. +// +// NOTE: Name spaces are required to be globally unique across resource types. +// A same vendor can register multiple name spaces for same resource type or +// for different resource type, but the schema namespace URL must be unique +// for each of these cases. +// A CapabilityMetadata object is uniquely identified based on the namespace +// it belongs to and it's unique identifier within that namespace. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityNamespaceInfo struct { types.DynamicData - Version string `xml:"version"` - Namespace string `xml:"namespace"` - Info *PbmExtendedElementDescription `xml:"info,omitempty"` + // Schema version + Version string `xml:"version" json:"version"` + // Schema namespace. + Namespace string `xml:"namespace" json:"namespace"` + Info *PbmExtendedElementDescription `xml:"info,omitempty" json:"info,omitempty"` } func init() { types.Add("pbm:PbmCapabilityNamespaceInfo", reflect.TypeOf((*PbmCapabilityNamespaceInfo)(nil)).Elem()) } +// The `PbmCapabilityProfile` data object defines +// capability-based profiles. +// +// A capability-based profile is derived +// from tag-based storage capabilities or from vSAN storage capabilities. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityProfile struct { PbmProfile - ProfileCategory string `xml:"profileCategory"` - ResourceType PbmProfileResourceType `xml:"resourceType"` - Constraints BasePbmCapabilityConstraints `xml:"constraints,typeattr"` - GenerationId int64 `xml:"generationId,omitempty"` - IsDefault bool `xml:"isDefault"` - SystemCreatedProfileType string `xml:"systemCreatedProfileType,omitempty"` - LineOfService string `xml:"lineOfService,omitempty"` + // Indicates whether the profile is requirement + // profile, a resource profile or a data service profile. + // + // The profileCategory + // is a string value that corresponds to one of the + // `PbmProfileCategoryEnum_enum` values. + // - REQUIREMENT profile - Defines the storage constraints applied + // to virtual machine placement. Requirements are defined by + // the user and can be associated with virtual machines and virtual + // disks. During provisioning, you can use a requirements profile + // for compliance and placement checking to support + // selection and configuration of resources. + // - RESOURCE profile - Specifies system-defined storage capabilities. + // You cannot modify a resource profile. You cannot associate a resource + // profile with vSphere entities, use it during provisioning, or target + // entities for resource selection or configuration. + // This type of profile gives the user visibility into the capabilities + // supported by the storage provider. + // + // DATA\_SERVICE\_POLICY - Indicates a data service policy that can + // be embedded into another storage policy. Policies of this type can't + // be assigned to Virtual Machines or Virtual Disks. This policy cannot + // be used for compliance checking. + ProfileCategory string `xml:"profileCategory" json:"profileCategory"` + // Type of the target resource to which the capability information applies. + // + // A fixed enum that defines resource types for which capabilities can be defined + // see `PbmProfileResourceType`, `PbmProfileResourceTypeEnum_enum` + ResourceType PbmProfileResourceType `xml:"resourceType" json:"resourceType"` + // Subprofiles that describe storage requirements or storage provider capabilities, + // depending on the profile category (REQUIREMENT or RESOURCE). + Constraints BasePbmCapabilityConstraints `xml:"constraints,typeattr" json:"constraints"` + // Generation ID is used to communicate the current version of the profile to VASA + // providers. + // + // It is only applicable to REQUIREMENT profile types. Every time a + // requirement profile is edited, the Server will increment the generationId. You + // do not need to set the generationID. When an object is created (or + // reconfigured), the Server will send the requirement profile content, profile ID and + // the generationID to VASA provider. + GenerationId int64 `xml:"generationId,omitempty" json:"generationId,omitempty"` + // Deprecated since it is not supported. + // + // Not supported in this release. + IsDefault bool `xml:"isDefault" json:"isDefault"` + // Indicates the type of system pre-created default profile. + // + // This will be set only for system pre-created default profiles. And + // this is not set for RESOURCE profiles. + SystemCreatedProfileType string `xml:"systemCreatedProfileType,omitempty" json:"systemCreatedProfileType,omitempty"` + // This property is set only for data service policy. + // + // Indicates the line of service + // `PbmLineOfServiceInfoLineOfServiceEnum_enum` of the data service policy. + LineOfService string `xml:"lineOfService,omitempty" json:"lineOfService,omitempty"` } func init() { types.Add("pbm:PbmCapabilityProfile", reflect.TypeOf((*PbmCapabilityProfile)(nil)).Elem()) } +// The `PbmCapabilityProfileCreateSpec` describes storage requirements. +// +// Use this data object to create a `PbmCapabilityProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityProfileCreateSpec struct { types.DynamicData - Name string `xml:"name"` - Description string `xml:"description,omitempty"` - Category string `xml:"category,omitempty"` - ResourceType PbmProfileResourceType `xml:"resourceType"` - Constraints BasePbmCapabilityConstraints `xml:"constraints,typeattr"` + // Name of the capability based profile to be created. + // + // The maximum length of the name is 80 characters. + Name string `xml:"name" json:"name"` + // Text description associated with the profile. + Description string `xml:"description,omitempty" json:"description,omitempty"` + // Category specifies the type of policy to be created. + // + // This can be REQUIREMENT from + // `PbmProfileCategoryEnum_enum` + // or null when creating a storage policy. And it can be DATA\_SERVICE\_POLICY from + // `PbmProfileCategoryEnum_enum` + // when creating a data service policy. RESOURCE from `PbmProfileCategoryEnum_enum` + // is not allowed as resource profile is created by the system. + Category string `xml:"category,omitempty" json:"category,omitempty"` + // Deprecated as of vSphere API 6.5. + // + // Specifies the type of resource to which the profile applies. + // + // The only legal value is STORAGE - deprecated. + ResourceType PbmProfileResourceType `xml:"resourceType" json:"resourceType"` + // Set of subprofiles that define the storage requirements. + // + // A subprofile corresponds to a rule set in the vSphere Web Client. + Constraints BasePbmCapabilityConstraints `xml:"constraints,typeattr" json:"constraints"` } func init() { types.Add("pbm:PbmCapabilityProfileCreateSpec", reflect.TypeOf((*PbmCapabilityProfileCreateSpec)(nil)).Elem()) } +// Fault used when a datastore doesnt match the capability profile property instance in requirements profile. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityProfilePropertyMismatchFault struct { PbmPropertyMismatchFault - ResourcePropertyInstance PbmCapabilityPropertyInstance `xml:"resourcePropertyInstance"` + // The property instance in the resource profile that does not match. + ResourcePropertyInstance PbmCapabilityPropertyInstance `xml:"resourcePropertyInstance" json:"resourcePropertyInstance"` } func init() { @@ -450,118 +715,347 @@ func init() { types.Add("pbm:PbmCapabilityProfilePropertyMismatchFaultFault", reflect.TypeOf((*PbmCapabilityProfilePropertyMismatchFaultFault)(nil)).Elem()) } +// The `PbmCapabilityProfileUpdateSpec` data object +// contains data that you use to update a storage profile. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityProfileUpdateSpec struct { types.DynamicData - Name string `xml:"name,omitempty"` - Description string `xml:"description,omitempty"` - Constraints BasePbmCapabilityConstraints `xml:"constraints,omitempty,typeattr"` + // Specifies a new profile name. + Name string `xml:"name,omitempty" json:"name,omitempty"` + // Specifies a new profile description. + Description string `xml:"description,omitempty" json:"description,omitempty"` + // Specifies one or more subprofiles. + // + // A subprofile defines one or more + // storage requirements. + Constraints BasePbmCapabilityConstraints `xml:"constraints,omitempty,typeattr" json:"constraints,omitempty"` } func init() { types.Add("pbm:PbmCapabilityProfileUpdateSpec", reflect.TypeOf((*PbmCapabilityProfileUpdateSpec)(nil)).Elem()) } +// The `PbmCapabilityPropertyInstance` data object describes a virtual machine +// storage requirement. +// +// A storage requirement is based on the storage capability +// described in the `PbmCapabilityPropertyMetadata` and in the +// datastore profile property instance. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityPropertyInstance struct { types.DynamicData - Id string `xml:"id"` - Operator string `xml:"operator,omitempty"` - Value types.AnyType `xml:"value,typeattr"` + // Requirement property identifier. + // + // This identifier corresponds to the + // storage capability metadata identifier + // (`PbmCapabilityPropertyMetadata*.*PbmCapabilityPropertyMetadata.id`). + Id string `xml:"id" json:"id"` + // Operator for the values. + // + // Currently only support NOT operator for + // tag namespace + // See operator definition in (`PbmCapabilityOperator_enum`). + Operator string `xml:"operator,omitempty" json:"operator,omitempty"` + // Property value. + // + // You must specify the value. + // A property value is one value or a collection of values. + // - A single property value is expressed as a scalar value. + // - A collection of values is expressed as a `PbmCapabilityDiscreteSet` + // or a `PbmCapabilityRange` of values. + // + // The datatype of each value must be one of the + // `PbmBuiltinType_enum` datatypes. + // If the property consists of a collection of values, + // the interpretation of those values is determined by the + // `PbmCapabilityGenericTypeInfo`. + // + // Type information for a property instance is described in the property metadata + // (`PbmCapabilityPropertyMetadata*.*PbmCapabilityPropertyMetadata.type`). + Value types.AnyType `xml:"value,typeattr" json:"value"` } func init() { types.Add("pbm:PbmCapabilityPropertyInstance", reflect.TypeOf((*PbmCapabilityPropertyInstance)(nil)).Elem()) } +// The `PbmCapabilityPropertyMetadata` data object describes storage capability. +// +// An instance of property metadata may apply to many property instances +// (`PbmCapabilityPropertyInstance`). +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityPropertyMetadata struct { types.DynamicData - Id string `xml:"id"` - Summary PbmExtendedElementDescription `xml:"summary"` - Mandatory bool `xml:"mandatory"` - Type BasePbmCapabilityTypeInfo `xml:"type,omitempty,typeattr"` - DefaultValue types.AnyType `xml:"defaultValue,omitempty,typeattr"` - AllowedValue types.AnyType `xml:"allowedValue,omitempty,typeattr"` - RequirementsTypeHint string `xml:"requirementsTypeHint,omitempty"` + // Property identifier. + // + // Should be unique within the definition of the + // capability. Property instances refer to this identifier + // (`PbmCapabilityPropertyInstance*.*PbmCapabilityPropertyInstance.id`). + Id string `xml:"id" json:"id"` + // Property name and description. + // - The summary.label property + // (`PbmExtendedElementDescription.label`) + // contains property 'name' in server locale. + // - The summary.summary property + // (`PbmExtendedElementDescription.summary`) + // contains property 'description' in server locale. + // - The summary.messageCatalogKeyPrefix property + // (`PbmExtendedElementDescription.messageCatalogKeyPrefix`) + // contains unique prefix for this property within given message catalog. + // Prefix format: <capability\_unique\_identifier.<property\_id + // capability\_unique\_identifier -- string representation of + // `PbmCapabilityMetadataUniqueId` which globally identifies given + // capability metadata definition uniquely. + // property\_id -- 'id' of this property `PbmCapabilityPropertyMetadata.id` + // Eg www.emc.com.storage.Recovery.Recovery\_site + // www.emc.com.storage.Recovery.RPO + // www.emc.com.storage.Recovery.RTO + Summary PbmExtendedElementDescription `xml:"summary" json:"summary"` + // Indicates whether incorporating given capability is mandatory during creation of + // profile. + Mandatory bool `xml:"mandatory" json:"mandatory"` + // Type information for the capability. + // + // The type of a property value + // (`PbmCapabilityPropertyInstance*.*PbmCapabilityPropertyInstance.value`) + // is specified as a builtin datatype and may also specify the interpretation of a + // collection of values of that datatype. + // - `PbmCapabilityPropertyMetadata.type*.*PbmCapabilityTypeInfo.typeName` + // specifies the `PbmBuiltinType_enum`. + // - `PbmCapabilityPropertyMetadata.type*.*PbmCapabilityGenericTypeInfo.genericTypeName` + // indicates how a collection of values of the specified datatype will be interpreted + // (`PbmBuiltinGenericType_enum`). + Type BasePbmCapabilityTypeInfo `xml:"type,omitempty,typeattr" json:"type,omitempty"` + // Default value, if any, that the property will assume when not + // constrained by requirements. + // + // This object must be of the + // `PbmCapabilityPropertyMetadata.type` + // defined for the property. + DefaultValue types.AnyType `xml:"defaultValue,omitempty,typeattr" json:"defaultValue,omitempty"` + // All legal values that the property may take on, across all + // implementations of the property. + // + // This definition of legal values is not + // determined by any particular resource configuration; rather it is + // inherent to the definition of the property. If undefined, then any value + // of the correct type is legal. This object must be a generic container for + // the `PbmCapabilityPropertyMetadata.type` + // defined for the property; + // see `PbmBuiltinGenericType_enum` + // for the supported generic container types. + AllowedValue types.AnyType `xml:"allowedValue,omitempty,typeattr" json:"allowedValue,omitempty"` + // A hint for data-driven systems that assist in authoring requirements + // constraints. + // + // Acceptable values defined by + // `PbmBuiltinGenericType_enum`. + // A property will typically only have constraints of a given type in + // requirement profiles, even if it is likely to use constraints of + // different types across capability profiles. This value, if specified, + // specifies the expected kind of constraint used in requirement profiles. + // Considerations for using this information: + // - This is only a hint; any properly formed constraint + // (see `PbmCapabilityPropertyInstance.value`) + // is still valid for a requirement profile. + // - If VMW\_SET is hinted, then a single value matching the property metadata type is + // also an expected form of constraint, as the latter is an allowed convenience + // for expressing a single-member set. + // - If this hint is not specified, then the authoring system may default to a form of + // constraint determined by its own criteria. + RequirementsTypeHint string `xml:"requirementsTypeHint,omitempty" json:"requirementsTypeHint,omitempty"` } func init() { types.Add("pbm:PbmCapabilityPropertyMetadata", reflect.TypeOf((*PbmCapabilityPropertyMetadata)(nil)).Elem()) } +// The `PbmCapabilityRange` data object defines a range of values for storage property +// instances (`PbmCapabilityPropertyInstance`). +// +// Use the range type to define a range of values of a supported builtin type, +// for example range<int>, range<long>, or range<timespan>. +// You can specify a partial range by omitting one of the properties, min or max. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityRange struct { types.DynamicData - Min types.AnyType `xml:"min,typeattr"` - Max types.AnyType `xml:"max,typeattr"` + // Minimum value of range. + // + // Must be one of the supported + // datatypes as defined in `PbmBuiltinType_enum`. + // Must be the same datatype as min. + Min types.AnyType `xml:"min,typeattr" json:"min"` + // Maximum value of range. + // + // Must be one of the supported + // datatypes as defined in `PbmBuiltinType_enum`. + // Must be the same datatype as max. + Max types.AnyType `xml:"max,typeattr" json:"max"` } func init() { types.Add("pbm:PbmCapabilityRange", reflect.TypeOf((*PbmCapabilityRange)(nil)).Elem()) } +// Capability Schema information +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilitySchema struct { types.DynamicData - VendorInfo PbmCapabilitySchemaVendorInfo `xml:"vendorInfo"` - NamespaceInfo PbmCapabilityNamespaceInfo `xml:"namespaceInfo"` - LineOfService BasePbmLineOfServiceInfo `xml:"lineOfService,omitempty,typeattr"` - CapabilityMetadataPerCategory []PbmCapabilityMetadataPerCategory `xml:"capabilityMetadataPerCategory"` + VendorInfo PbmCapabilitySchemaVendorInfo `xml:"vendorInfo" json:"vendorInfo"` + NamespaceInfo PbmCapabilityNamespaceInfo `xml:"namespaceInfo" json:"namespaceInfo"` + // Service type for the schema. + // + // Do not use Category as each service needs to have its own schema version. + // + // If omitted, this schema specifies persistence capabilities. + LineOfService BasePbmLineOfServiceInfo `xml:"lineOfService,omitempty,typeattr" json:"lineOfService,omitempty"` + // Capability metadata organized by category + CapabilityMetadataPerCategory []PbmCapabilityMetadataPerCategory `xml:"capabilityMetadataPerCategory" json:"capabilityMetadataPerCategory"` } func init() { types.Add("pbm:PbmCapabilitySchema", reflect.TypeOf((*PbmCapabilitySchema)(nil)).Elem()) } +// Information about vendor/owner of the capability metadata schema +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilitySchemaVendorInfo struct { types.DynamicData - VendorUuid string `xml:"vendorUuid"` - Info PbmExtendedElementDescription `xml:"info"` + // Unique identifier for the vendor who owns the given capability + // schema definition + VendorUuid string `xml:"vendorUuid" json:"vendorUuid"` + // Captures name and description information about the vendor/owner of + // the schema. + // - The summary.label property + // (`PbmExtendedElementDescription.label`) + // contains vendor name information in server locale. + // - The summary.summary property + // (`PbmExtendedElementDescription.summary`) + // contains vendor description string in server locale. + // - The summary.messageCatalogKeyPrefix property + // (`PbmExtendedElementDescription.messageCatalogKeyPrefix`) + // contains unique prefix for the vendor information within given message + // catalog. + Info PbmExtendedElementDescription `xml:"info" json:"info"` } func init() { types.Add("pbm:PbmCapabilitySchemaVendorInfo", reflect.TypeOf((*PbmCapabilitySchemaVendorInfo)(nil)).Elem()) } +// A `PbmCapabilitySubProfile` +// is a section within a profile that aggregates one or more capability +// instances. +// +// Capability instances define storage constraints. +// +// All constraints within a subprofile are ANDed by default. +// When you perform compliance checking on a virtual machine or virtual +// disk, all of the constraints must be satisfied by the storage capabilities. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilitySubProfile struct { types.DynamicData - Name string `xml:"name"` - Capability []PbmCapabilityInstance `xml:"capability"` - ForceProvision *bool `xml:"forceProvision"` + // Subprofile name. + Name string `xml:"name" json:"name"` + // List of capability instances. + Capability []PbmCapabilityInstance `xml:"capability" json:"capability"` + // Indicates whether the source policy profile allows creating a virtual machine + // or virtual disk that may be non-compliant. + ForceProvision *bool `xml:"forceProvision" json:"forceProvision,omitempty"` } func init() { types.Add("pbm:PbmCapabilitySubProfile", reflect.TypeOf((*PbmCapabilitySubProfile)(nil)).Elem()) } +// The `PbmCapabilitySubProfileConstraints` data object defines a group +// of storage subprofiles. +// +// Subprofile usage depends on the type of profile +// (`PbmCapabilityProfile*.*PbmCapabilityProfile.profileCategory`). +// - For a REQUIREMENTS profile, each subprofile defines storage requirements. +// A Storage Policy API requirements subprofile corresponds to a vSphere Web Client +// rule set. +// - For a RESOURCE profile, each subprofile defines storage capabilities. +// Storage capabilities are read-only. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilitySubProfileConstraints struct { PbmCapabilityConstraints - SubProfiles []PbmCapabilitySubProfile `xml:"subProfiles"` + // Aggregation of one or more subprofiles. + // + // The relationship among all subprofiles is "OR". When you perform + // compliance checking on a profile that contains more than one subprofile, + // a non-compliant result for any one of the subprofiles will produce a + // non-compliant result for the operation. + SubProfiles []PbmCapabilitySubProfile `xml:"subProfiles" json:"subProfiles"` } func init() { types.Add("pbm:PbmCapabilitySubProfileConstraints", reflect.TypeOf((*PbmCapabilitySubProfileConstraints)(nil)).Elem()) } +// The `PbmCapabilityTimeSpan` data object defines a time value and time unit, +// for example 10 hours or 5 minutes. +// +// See +// `PbmBuiltinType_enum*.*VMW_TIMESPAN`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityTimeSpan struct { types.DynamicData - Value int32 `xml:"value"` - Unit string `xml:"unit"` + // Time value. + // + // Must be a positive integer. + Value int32 `xml:"value" json:"value"` + // Unit value for time. + // + // The string value must correspond + // to one of the `PbmCapabilityTimeUnitType_enum` values. + Unit string `xml:"unit" json:"unit"` } func init() { types.Add("pbm:PbmCapabilityTimeSpan", reflect.TypeOf((*PbmCapabilityTimeSpan)(nil)).Elem()) } +// The `PbmCapabilityTypeInfo` data object defines the datatype for a requirement +// or capability property. +// +// See `PbmCapabilityPropertyMetadata`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityTypeInfo struct { types.DynamicData - TypeName string `xml:"typeName"` + // Datatype for a property. + // + // Must be one of the types defined + // in `PbmBuiltinType_enum`. + // + // A property value might consist of a collection of values of the specified + // datatype. The interpretation of the collection is determined by the + // generic type (`PbmCapabilityGenericTypeInfo.genericTypeName`). + // The generic type indicates how a collection of values + // of the specified datatype will be interpreted. See the descriptions of the + // `PbmBuiltinType_enum` definitions. + TypeName string `xml:"typeName" json:"typeName"` } func init() { @@ -571,8 +1065,8 @@ func init() { type PbmCapabilityVendorNamespaceInfo struct { types.DynamicData - VendorInfo PbmCapabilitySchemaVendorInfo `xml:"vendorInfo"` - NamespaceInfo PbmCapabilityNamespaceInfo `xml:"namespaceInfo"` + VendorInfo PbmCapabilitySchemaVendorInfo `xml:"vendorInfo" json:"vendorInfo"` + NamespaceInfo PbmCapabilityNamespaceInfo `xml:"namespaceInfo" json:"namespaceInfo"` } func init() { @@ -582,8 +1076,14 @@ func init() { type PbmCapabilityVendorResourceTypeInfo struct { types.DynamicData - ResourceType string `xml:"resourceType"` - VendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:"vendorNamespaceInfo"` + // Resource type for which given vendor has registered given namespace + // along with capability metadata that belongs to the namespace. + // + // Must match one of the values for enum `PbmProfileResourceTypeEnum_enum` + ResourceType string `xml:"resourceType" json:"resourceType"` + // List of all vendorInfo -- namespaceInfo tuples that are registered for + // given resource type + VendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:"vendorNamespaceInfo" json:"vendorNamespaceInfo"` } func init() { @@ -596,10 +1096,16 @@ func init() { types.Add("pbm:PbmCheckCompatibility", reflect.TypeOf((*PbmCheckCompatibility)(nil)).Elem()) } +// The parameters of `PbmPlacementSolver.PbmCheckCompatibility`. type PbmCheckCompatibilityRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty"` - Profile PbmProfileId `xml:"profile"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Candidate list of hubs, either datastores or storage pods or a + // mix. If this parameter is not specified, the Server uses all + // of the datastores and storage pods for placement compatibility + // checking. + HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty" json:"hubsToSearch,omitempty"` + // Storage requirement profile. + Profile PbmProfileId `xml:"profile" json:"profile"` } func init() { @@ -607,7 +1113,7 @@ func init() { } type PbmCheckCompatibilityResponse struct { - Returnval []PbmPlacementCompatibilityResult `xml:"returnval,omitempty"` + Returnval []PbmPlacementCompatibilityResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmCheckCompatibilityWithSpec PbmCheckCompatibilityWithSpecRequestType @@ -616,10 +1122,15 @@ func init() { types.Add("pbm:PbmCheckCompatibilityWithSpec", reflect.TypeOf((*PbmCheckCompatibilityWithSpec)(nil)).Elem()) } +// The parameters of `PbmPlacementSolver.PbmCheckCompatibilityWithSpec`. type PbmCheckCompatibilityWithSpecRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty"` - ProfileSpec PbmCapabilityProfileCreateSpec `xml:"profileSpec"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Candidate list of hubs, either datastores or storage pods + // or a mix. If this parameter is not specified, the Server uses all of the + // datastores and storage pods for placement compatibility checking. + HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty" json:"hubsToSearch,omitempty"` + // Specification for a capability based profile. + ProfileSpec PbmCapabilityProfileCreateSpec `xml:"profileSpec" json:"profileSpec"` } func init() { @@ -627,7 +1138,7 @@ func init() { } type PbmCheckCompatibilityWithSpecResponse struct { - Returnval []PbmPlacementCompatibilityResult `xml:"returnval,omitempty"` + Returnval []PbmPlacementCompatibilityResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmCheckCompliance PbmCheckComplianceRequestType @@ -636,10 +1147,31 @@ func init() { types.Add("pbm:PbmCheckCompliance", reflect.TypeOf((*PbmCheckCompliance)(nil)).Elem()) } +// The parameters of `PbmComplianceManager.PbmCheckCompliance`. type PbmCheckComplianceRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Entities []PbmServerObjectRef `xml:"entities"` - Profile *PbmProfileId `xml:"profile,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // One or more references to storage entities. + // You can specify virtual machines and virtual disks + // A maximum of 1000 virtual machines and/or virtual disks can be specified + // in a call. The results of calling the checkCompliance API with + // more than a 1000 entities is undefined. + // - If the list of entities also contains datastores, the Server + // will ignore the datastores. + // - If the list contains valid and invalid entities, the Server ignores + // the invalid entities and returns results for the valid entities. + // Invalid entities are entities that are not in the vCenter inventory. + // - If the list contains only datastores, the method throws + // an InvalidArgument fault. + // - If the list contains virtual machines and disks and the entities + // are invalid or have been deleted by the time of the request, the method + // throws an InvalidArgument fault. + // + // If an entity does not have an associated storage profile, the entity + // is removed from the list. + Entities []PbmServerObjectRef `xml:"entities" json:"entities"` + // Not used. If specified, the Server ignores the value. + // The Server uses the profiles associated with the specified entities. + Profile *PbmProfileId `xml:"profile,omitempty" json:"profile,omitempty"` } func init() { @@ -647,7 +1179,7 @@ func init() { } type PbmCheckComplianceResponse struct { - Returnval []PbmComplianceResult `xml:"returnval,omitempty"` + Returnval []PbmComplianceResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmCheckRequirements PbmCheckRequirementsRequestType @@ -656,11 +1188,27 @@ func init() { types.Add("pbm:PbmCheckRequirements", reflect.TypeOf((*PbmCheckRequirements)(nil)).Elem()) } +// The parameters of `PbmPlacementSolver.PbmCheckRequirements`. type PbmCheckRequirementsRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty"` - PlacementSubjectRef *PbmServerObjectRef `xml:"placementSubjectRef,omitempty"` - PlacementSubjectRequirement []BasePbmPlacementRequirement `xml:"placementSubjectRequirement,omitempty,typeattr"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Candidate list of hubs, either datastores or storage pods + // or a mix. If this parameter is not specified, the Server uses all of the + // datastores and storage pods for placement compatibility checking. + HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty" json:"hubsToSearch,omitempty"` + // reference to the object being placed. Should be null when a new + // object is being provisioned. Should be specified when placement compatibility is being checked + // for an existing object. Supported objects are + // `virtualMachine`, + // `virtualMachineAndDisks`, + // `virtualDiskId`, + // `virtualDiskUUID` + PlacementSubjectRef *PbmServerObjectRef `xml:"placementSubjectRef,omitempty" json:"placementSubjectRef,omitempty"` + // Requirements including the policy requirements, compute + // requirements and capacity requirements. It is invalid to specify no requirements. It is also + // invalid to specify duplicate requirements or multiple conflicting requirements such as + // specifying both `PbmPlacementCapabilityConstraintsRequirement` and + // `PbmPlacementCapabilityProfileRequirement`. + PlacementSubjectRequirement []BasePbmPlacementRequirement `xml:"placementSubjectRequirement,omitempty,typeattr" json:"placementSubjectRequirement,omitempty"` } func init() { @@ -668,7 +1216,7 @@ func init() { } type PbmCheckRequirementsResponse struct { - Returnval []PbmPlacementCompatibilityResult `xml:"returnval,omitempty"` + Returnval []PbmPlacementCompatibilityResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmCheckRollupCompliance PbmCheckRollupComplianceRequestType @@ -677,9 +1225,14 @@ func init() { types.Add("pbm:PbmCheckRollupCompliance", reflect.TypeOf((*PbmCheckRollupCompliance)(nil)).Elem()) } +// The parameters of `PbmComplianceManager.PbmCheckRollupCompliance`. type PbmCheckRollupComplianceRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Entity []PbmServerObjectRef `xml:"entity"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // One or more references to virtual machines. + // A maximum of 1000 virtual machines can be specified + // in a call. The results of calling the checkRollupCompliance API with + // more than a 1000 entities is undefined. + Entity []PbmServerObjectRef `xml:"entity" json:"entity"` } func init() { @@ -687,13 +1240,17 @@ func init() { } type PbmCheckRollupComplianceResponse struct { - Returnval []PbmRollupComplianceResult `xml:"returnval,omitempty"` + Returnval []PbmRollupComplianceResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } +// Super class for all compatibility check faults. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCompatibilityCheckFault struct { PbmFault - Hub PbmPlacementHub `xml:"hub"` + // Placement Hub + Hub PbmPlacementHub `xml:"hub" json:"hub"` } func init() { @@ -706,43 +1263,124 @@ func init() { types.Add("pbm:PbmCompatibilityCheckFaultFault", reflect.TypeOf((*PbmCompatibilityCheckFaultFault)(nil)).Elem()) } +// Additional information on the effects of backend resources and +// operations on the storage object. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmComplianceOperationalStatus struct { types.DynamicData - Healthy *bool `xml:"healthy"` - OperationETA *time.Time `xml:"operationETA"` - OperationProgress int64 `xml:"operationProgress,omitempty"` - Transitional *bool `xml:"transitional"` + // Whether the object is currently affected by the failure of backend + // storage resources. + // + // Optional property. + Healthy *bool `xml:"healthy" json:"healthy,omitempty"` + // Estimated completion time of a backend operation affecting the object. + // + // If set, then "transitional" will be true. + // Optional property. + OperationETA *time.Time `xml:"operationETA" json:"operationETA,omitempty"` + // Percent progress of a backend operation affecting the object. + // + // If set, then "transitional" will be true. + // Optional property. + OperationProgress int64 `xml:"operationProgress,omitempty" json:"operationProgress,omitempty"` + // Whether an object is undergoing a backend operation that may affect + // its performance. + // + // This may be a rebalancing the resources of a healthy + // object or recovery tasks for an unhealthy object. + // Optional property. + Transitional *bool `xml:"transitional" json:"transitional,omitempty"` } func init() { types.Add("pbm:PbmComplianceOperationalStatus", reflect.TypeOf((*PbmComplianceOperationalStatus)(nil)).Elem()) } +// The `PbmCompliancePolicyStatus` data object provides information +// when compliance checking produces non-compliant results. +// +// See +// `PbmComplianceResult*.*PbmComplianceResult.violatedPolicies`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCompliancePolicyStatus struct { types.DynamicData - ExpectedValue PbmCapabilityInstance `xml:"expectedValue"` - CurrentValue *PbmCapabilityInstance `xml:"currentValue,omitempty"` + // Expected storage capability values of profile policies defined + // by a storage provider. + ExpectedValue PbmCapabilityInstance `xml:"expectedValue" json:"expectedValue"` + // Current storage requirement values of the profile policies + // specified for the virtual machine or virtual disk. + CurrentValue *PbmCapabilityInstance `xml:"currentValue,omitempty" json:"currentValue,omitempty"` } func init() { types.Add("pbm:PbmCompliancePolicyStatus", reflect.TypeOf((*PbmCompliancePolicyStatus)(nil)).Elem()) } +// The `PbmComplianceResult` data object describes the results of profile compliance +// checking for a virtual machine or virtual disk. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmComplianceResult struct { types.DynamicData - CheckTime time.Time `xml:"checkTime"` - Entity PbmServerObjectRef `xml:"entity"` - Profile *PbmProfileId `xml:"profile,omitempty"` - ComplianceTaskStatus string `xml:"complianceTaskStatus,omitempty"` - ComplianceStatus string `xml:"complianceStatus"` - Mismatch bool `xml:"mismatch"` - ViolatedPolicies []PbmCompliancePolicyStatus `xml:"violatedPolicies,omitempty"` - ErrorCause []types.LocalizedMethodFault `xml:"errorCause,omitempty"` - OperationalStatus *PbmComplianceOperationalStatus `xml:"operationalStatus,omitempty"` - Info *PbmExtendedElementDescription `xml:"info,omitempty"` + // Time when the compliance was checked. + CheckTime time.Time `xml:"checkTime" json:"checkTime"` + // Virtual machine or virtual disk for which compliance was checked. + Entity PbmServerObjectRef `xml:"entity" json:"entity"` + // Requirement profile with which the compliance was checked. + Profile *PbmProfileId `xml:"profile,omitempty" json:"profile,omitempty"` + // Status of the current running compliance operation. + // + // If there is no + // compliance check operation triggered, this indicates the last compliance + // task status. complianceTaskStatus is a string value that + // corresponds to one of the + // `PbmComplianceResultComplianceTaskStatus_enum` values. + ComplianceTaskStatus string `xml:"complianceTaskStatus,omitempty" json:"complianceTaskStatus,omitempty"` + // Status of the compliance operation. + // + // complianceStatus is a + // string value that corresponds to one of the + // `PbmComplianceStatus_enum` values. + // + // When you perform compliance checking on an entity whose associated profile + // contains more than one subprofile ( + // `PbmCapabilityProfile` . + // `PbmCapabilityProfile.constraints`), a compliant + // result for any one of the subprofiles will produce a compliant result + // for the operation. + ComplianceStatus string `xml:"complianceStatus" json:"complianceStatus"` + // Deprecated as of vSphere 2016, use + // `PbmComplianceStatus_enum` to + // know if a mismatch has occurred. If + // `PbmComplianceResult.complianceStatus` value + // is outOfDate, mismatch has occurred. + // + // Set to true if there is a profile version mismatch between the Storage + // Profile Server and the storage provider. + // + // If you receive a result that + // indicates a mismatch, you must use the vSphere API to update the profile + // associated with the virtual machine or virtual disk. + Mismatch bool `xml:"mismatch" json:"mismatch"` + // Values for capabilities that are known to be non-compliant with the specified constraints. + ViolatedPolicies []PbmCompliancePolicyStatus `xml:"violatedPolicies,omitempty" json:"violatedPolicies,omitempty"` + // This property is set if the compliance task fails with errors. + // + // There can be + // more than one error since a policy containing multiple blobs can return + // multiple failures, one for each blob. + ErrorCause []types.LocalizedMethodFault `xml:"errorCause,omitempty" json:"errorCause,omitempty"` + // Additional information on the effects of backend resources and + // operations on the storage object. + OperationalStatus *PbmComplianceOperationalStatus `xml:"operationalStatus,omitempty" json:"operationalStatus,omitempty"` + // Informational localized messages provided by the VASA provider in + // addition to the violatedPolicy. + Info *PbmExtendedElementDescription `xml:"info,omitempty" json:"info,omitempty"` } func init() { @@ -755,9 +1393,11 @@ func init() { types.Add("pbm:PbmCreate", reflect.TypeOf((*PbmCreate)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmCreate`. type PbmCreateRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - CreateSpec PbmCapabilityProfileCreateSpec `xml:"createSpec"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Capability-based profile specification. + CreateSpec PbmCapabilityProfileCreateSpec `xml:"createSpec" json:"createSpec"` } func init() { @@ -765,48 +1405,87 @@ func init() { } type PbmCreateResponse struct { - Returnval PbmProfileId `xml:"returnval"` + Returnval PbmProfileId `xml:"returnval" json:"returnval"` } +// DataServiceToProfilesMap maps the data service policy to the parent storage policies +// if referred. +// +// This is returned from the API call +// `ProfileManager#queryParentStoragePolicies(ProfileId[])` +// +// This structure may be used only with operations rendered under `/pbm`. type PbmDataServiceToPoliciesMap struct { types.DynamicData - DataServicePolicy PbmProfileId `xml:"dataServicePolicy"` - ParentStoragePolicies []PbmProfileId `xml:"parentStoragePolicies,omitempty"` - Fault *types.LocalizedMethodFault `xml:"fault,omitempty"` + // Denotes a Data Service Policy Id. + DataServicePolicy PbmProfileId `xml:"dataServicePolicy" json:"dataServicePolicy"` + // Storage Policies that refer to the Data Service Policy given by + // `PbmDataServiceToPoliciesMap.dataServicePolicy`. + ParentStoragePolicies []PbmProfileId `xml:"parentStoragePolicies,omitempty" json:"parentStoragePolicies,omitempty"` + // The fault is set in case of error conditions and this property will + // have the reason. + Fault *types.LocalizedMethodFault `xml:"fault,omitempty" json:"fault,omitempty"` } func init() { types.Add("pbm:PbmDataServiceToPoliciesMap", reflect.TypeOf((*PbmDataServiceToPoliciesMap)(nil)).Elem()) } +// Space stats for datastore +// +// This structure may be used only with operations rendered under `/pbm`. type PbmDatastoreSpaceStatistics struct { types.DynamicData - ProfileId string `xml:"profileId,omitempty"` - PhysicalTotalInMB int64 `xml:"physicalTotalInMB"` - PhysicalFreeInMB int64 `xml:"physicalFreeInMB"` - PhysicalUsedInMB int64 `xml:"physicalUsedInMB"` - LogicalLimitInMB int64 `xml:"logicalLimitInMB,omitempty"` - LogicalFreeInMB int64 `xml:"logicalFreeInMB"` - LogicalUsedInMB int64 `xml:"logicalUsedInMB"` + // Capability profile id. + // + // It is null when the statistics are for the entire + // datastore. + ProfileId string `xml:"profileId,omitempty" json:"profileId,omitempty"` + // Total physical space in MB. + PhysicalTotalInMB int64 `xml:"physicalTotalInMB" json:"physicalTotalInMB"` + // Total physical free space in MB. + PhysicalFreeInMB int64 `xml:"physicalFreeInMB" json:"physicalFreeInMB"` + // Used physical storage space in MB. + PhysicalUsedInMB int64 `xml:"physicalUsedInMB" json:"physicalUsedInMB"` + // Logical space limit set by the storage admin in MB. + // + // Omitted if there is no Logical space limit. + LogicalLimitInMB int64 `xml:"logicalLimitInMB,omitempty" json:"logicalLimitInMB,omitempty"` + // Free logical storage space in MB. + LogicalFreeInMB int64 `xml:"logicalFreeInMB" json:"logicalFreeInMB"` + // Used logical storage space in MB. + LogicalUsedInMB int64 `xml:"logicalUsedInMB" json:"logicalUsedInMB"` } func init() { types.Add("pbm:PbmDatastoreSpaceStatistics", reflect.TypeOf((*PbmDatastoreSpaceStatistics)(nil)).Elem()) } +// Not supported in this release. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmDefaultCapabilityProfile struct { PbmCapabilityProfile - VvolType []string `xml:"vvolType"` - ContainerId string `xml:"containerId"` + // Not supported in this release. + VvolType []string `xml:"vvolType" json:"vvolType"` + // Not supported in this release. + ContainerId string `xml:"containerId" json:"containerId"` } func init() { types.Add("pbm:PbmDefaultCapabilityProfile", reflect.TypeOf((*PbmDefaultCapabilityProfile)(nil)).Elem()) } +// Warning fault used to indicate that the vendor specific datastore matches the tag in the +// requirements profile that does not have a vendor specific rule set. +// +// In such case, +// an empty blob is sent to the vendor specific datastore and the default profile would apply. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmDefaultProfileAppliesFault struct { PbmCompatibilityCheckFault } @@ -821,11 +1500,20 @@ func init() { types.Add("pbm:PbmDefaultProfileAppliesFaultFault", reflect.TypeOf((*PbmDefaultProfileAppliesFaultFault)(nil)).Elem()) } +// Data structure that stores the default profile for datastores. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmDefaultProfileInfo struct { types.DynamicData - Datastores []PbmPlacementHub `xml:"datastores"` - DefaultProfile BasePbmProfile `xml:"defaultProfile,omitempty,typeattr"` + // Datastores + Datastores []PbmPlacementHub `xml:"datastores" json:"datastores"` + // Default requirements profile. + // + // It is set to null if the datastores are not associated with any default profile. + DefaultProfile BasePbmProfile `xml:"defaultProfile,omitempty,typeattr" json:"defaultProfile,omitempty"` + // NoPermission fault if default profile is not permitted. + MethodFault *types.LocalizedMethodFault `xml:"methodFault,omitempty" json:"methodFault,omitempty"` } func init() { @@ -838,9 +1526,11 @@ func init() { types.Add("pbm:PbmDelete", reflect.TypeOf((*PbmDelete)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmDelete`. type PbmDeleteRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - ProfileId []PbmProfileId `xml:"profileId"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Array of profile identifiers. + ProfileId []PbmProfileId `xml:"profileId" json:"profileId"` } func init() { @@ -848,13 +1538,18 @@ func init() { } type PbmDeleteResponse struct { - Returnval []PbmProfileOperationOutcome `xml:"returnval,omitempty"` + Returnval []PbmProfileOperationOutcome `xml:"returnval,omitempty" json:"returnval,omitempty"` } +// A DuplicateName exception is thrown because a name already exists +// in the same name space. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmDuplicateName struct { PbmFault - Name string `xml:"name"` + // The name that is already bound in the name space. + Name string `xml:"name" json:"name"` } func init() { @@ -870,17 +1565,38 @@ func init() { type PbmExtendedElementDescription struct { types.DynamicData - Label string `xml:"label"` - Summary string `xml:"summary"` - Key string `xml:"key"` - MessageCatalogKeyPrefix string `xml:"messageCatalogKeyPrefix"` - MessageArg []types.KeyAnyValue `xml:"messageArg,omitempty"` + // Display label. + Label string `xml:"label" json:"label"` + // Summary description. + Summary string `xml:"summary" json:"summary"` + // Enumeration or literal ID being described. + Key string `xml:"key" json:"key"` + // Key to the localized message string in the catalog. + // + // If the localized string contains parameters, values to the + // parameters will be provided in #messageArg. + // E.g: If the message in the catalog is + // "IP address is {address}", value for "address" + // will be provided by #messageArg. + // Both summary and label in ElementDescription will have a corresponding + // entry in the message catalog with the keys + // .summary and .label + // respectively. + // ElementDescription.summary and ElementDescription.label will contain + // the strings in server locale. + MessageCatalogKeyPrefix string `xml:"messageCatalogKeyPrefix" json:"messageCatalogKeyPrefix"` + // Provides named arguments that can be used to localize the + // message in the catalog. + MessageArg []types.KeyAnyValue `xml:"messageArg,omitempty" json:"messageArg,omitempty"` } func init() { types.Add("pbm:PbmExtendedElementDescription", reflect.TypeOf((*PbmExtendedElementDescription)(nil)).Elem()) } +// The super class for all pbm faults. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFault struct { types.MethodFault } @@ -895,6 +1611,10 @@ func init() { types.Add("pbm:PbmFaultFault", reflect.TypeOf((*PbmFaultFault)(nil)).Elem()) } +// Thrown when login fails due to token not provided or token could not be +// validated. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFaultInvalidLogin struct { PbmFault } @@ -909,17 +1629,51 @@ func init() { types.Add("pbm:PbmFaultInvalidLoginFault", reflect.TypeOf((*PbmFaultInvalidLoginFault)(nil)).Elem()) } +// Thrown when an operation is denied because of a privilege +// not held on a storage profile. +// +// This structure may be used only with operations rendered under `/pbm`. +type PbmFaultNoPermission struct { + types.SecurityError + + // List of profile ids and missing privileges for each profile + MissingPrivileges []PbmFaultNoPermissionEntityPrivileges `xml:"missingPrivileges,omitempty" json:"missingPrivileges,omitempty"` +} + +func init() { + types.Add("pbm:PbmFaultNoPermission", reflect.TypeOf((*PbmFaultNoPermission)(nil)).Elem()) +} + type PbmFaultNoPermissionEntityPrivileges struct { types.DynamicData - ProfileId *PbmProfileId `xml:"profileId,omitempty"` - PrivilegeIds []string `xml:"privilegeIds,omitempty"` + ProfileId *PbmProfileId `xml:"profileId,omitempty" json:"profileId,omitempty"` + PrivilegeIds []string `xml:"privilegeIds,omitempty" json:"privilegeIds,omitempty"` } func init() { types.Add("pbm:PbmFaultNoPermissionEntityPrivileges", reflect.TypeOf((*PbmFaultNoPermissionEntityPrivileges)(nil)).Elem()) } +type PbmFaultNoPermissionFault PbmFaultNoPermission + +func init() { + types.Add("pbm:PbmFaultNoPermissionFault", reflect.TypeOf((*PbmFaultNoPermissionFault)(nil)).Elem()) +} + +// A NotFound error occurs when a referenced component of a managed +// object cannot be found. +// +// The referenced component can be a data +// object type (such as a role or permission) or a primitive +// (such as a string). +// +// For example, if the missing referenced component is a data object, such as +// VirtualSwitch, the NotFound error is +// thrown. The NotFound error is also thrown if the data object is found, but the referenced name +// (for example, "vswitch0") is not. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFaultNotFound struct { PbmFault } @@ -954,10 +1708,19 @@ func init() { types.Add("pbm:PbmFetchCapabilityMetadata", reflect.TypeOf((*PbmFetchCapabilityMetadata)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmFetchCapabilityMetadata`. type PbmFetchCapabilityMetadataRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - ResourceType *PbmProfileResourceType `xml:"resourceType,omitempty"` - VendorUuid string `xml:"vendorUuid,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Type of profile resource. The Server supports the "STORAGE" resource + // type only. If not specified, this method will return capability metadata for the storage + // resources. Any other resourceType is considered invalid. + ResourceType *PbmProfileResourceType `xml:"resourceType,omitempty" json:"resourceType,omitempty"` + // Unique identifier for the vendor/owner of capability + // metadata. The specified vendor ID must match + // `PbmCapabilitySchemaVendorInfo*.*PbmCapabilitySchemaVendorInfo.vendorUuid`. + // If omitted, the Server searchs all capability metadata registered with the system. If a + // vendorUuid unknown to the Server is specified, empty results will be returned. + VendorUuid string `xml:"vendorUuid,omitempty" json:"vendorUuid,omitempty"` } func init() { @@ -965,7 +1728,7 @@ func init() { } type PbmFetchCapabilityMetadataResponse struct { - Returnval []PbmCapabilityMetadataPerCategory `xml:"returnval,omitempty"` + Returnval []PbmCapabilityMetadataPerCategory `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmFetchCapabilitySchema PbmFetchCapabilitySchemaRequestType @@ -974,10 +1737,20 @@ func init() { types.Add("pbm:PbmFetchCapabilitySchema", reflect.TypeOf((*PbmFetchCapabilitySchema)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmFetchCapabilitySchema`. type PbmFetchCapabilitySchemaRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - VendorUuid string `xml:"vendorUuid,omitempty"` - LineOfService []string `xml:"lineOfService,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Unique identifier for the vendor/owner of capability metadata. + // If omitted, the server searchs all capability metadata registered + // with the system. The specified vendor ID must match + // `PbmCapabilitySchemaVendorInfo*.*PbmCapabilitySchemaVendorInfo.vendorUuid`. + VendorUuid string `xml:"vendorUuid,omitempty" json:"vendorUuid,omitempty"` + // Optional line of service that must match `PbmLineOfServiceInfoLineOfServiceEnum_enum`. + // If specified, the capability schema objects + // are returned for the given lineOfServices. If null, then all + // capability schema objects that may or may not have data service capabilities + // are returned. + LineOfService []string `xml:"lineOfService,omitempty" json:"lineOfService,omitempty"` } func init() { @@ -985,7 +1758,7 @@ func init() { } type PbmFetchCapabilitySchemaResponse struct { - Returnval []PbmCapabilitySchema `xml:"returnval,omitempty"` + Returnval []PbmCapabilitySchema `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmFetchComplianceResult PbmFetchComplianceResultRequestType @@ -994,10 +1767,24 @@ func init() { types.Add("pbm:PbmFetchComplianceResult", reflect.TypeOf((*PbmFetchComplianceResult)(nil)).Elem()) } +// The parameters of `PbmComplianceManager.PbmFetchComplianceResult`. type PbmFetchComplianceResultRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Entities []PbmServerObjectRef `xml:"entities"` - Profile *PbmProfileId `xml:"profile,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // One or more references to storage entities. + // A maximum of 1000 virtual machines and/or virtual disks can be specified + // in a call. The results of calling the fetchComplianceResult API with + // more than a 1000 entities is undefined. + // - If the list of entities also contains datastores, the Server + // will ignore the datastores. + // - If the list contains valid and invalid entities, the Server ignores + // the invalid entities and returns results for the valid entities. + // Invalid entities are entities that are not in the vCenter inventory. + // - If the list contains only datastores, the method throws + // an InvalidArgument fault. + Entities []PbmServerObjectRef `xml:"entities" json:"entities"` + // Not used. if specified, the Server ignores the value. + // The Server uses the profiles associated with the specified entities. + Profile *PbmProfileId `xml:"profile,omitempty" json:"profile,omitempty"` } func init() { @@ -1005,14 +1792,22 @@ func init() { } type PbmFetchComplianceResultResponse struct { - Returnval []PbmComplianceResult `xml:"returnval,omitempty"` + Returnval []PbmComplianceResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } +// The `PbmFetchEntityHealthStatusSpec` data object contains +// the arguments required for +// `PbmComplianceManager.PbmFetchEntityHealthStatusExt`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFetchEntityHealthStatusSpec struct { types.DynamicData - ObjectRef PbmServerObjectRef `xml:"objectRef"` - BackingId string `xml:"backingId,omitempty"` + // `PbmServerObjectRef` for which the healthStatus is required + ObjectRef PbmServerObjectRef `xml:"objectRef" json:"objectRef"` + // BackingId for the ServerObjectRef + // BackingId is mandatory for FCD on vSAN + BackingId string `xml:"backingId,omitempty" json:"backingId,omitempty"` } func init() { @@ -1026,7 +1821,7 @@ func init() { } type PbmFetchResourceTypeRequestType struct { - This types.ManagedObjectReference `xml:"_this"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` } func init() { @@ -1034,7 +1829,7 @@ func init() { } type PbmFetchResourceTypeResponse struct { - Returnval []PbmProfileResourceType `xml:"returnval,omitempty"` + Returnval []PbmProfileResourceType `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmFetchRollupComplianceResult PbmFetchRollupComplianceResultRequestType @@ -1043,9 +1838,14 @@ func init() { types.Add("pbm:PbmFetchRollupComplianceResult", reflect.TypeOf((*PbmFetchRollupComplianceResult)(nil)).Elem()) } +// The parameters of `PbmComplianceManager.PbmFetchRollupComplianceResult`. type PbmFetchRollupComplianceResultRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Entity []PbmServerObjectRef `xml:"entity"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // One or more virtual machines. + // A maximum of 1000 virtual machines can be specified + // in a call. The results of calling the fetchRollupComplianceResult API with + // more than a 1000 entity objects is undefined. + Entity []PbmServerObjectRef `xml:"entity" json:"entity"` } func init() { @@ -1053,7 +1853,7 @@ func init() { } type PbmFetchRollupComplianceResultResponse struct { - Returnval []PbmRollupComplianceResult `xml:"returnval,omitempty"` + Returnval []PbmRollupComplianceResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmFetchVendorInfo PbmFetchVendorInfoRequestType @@ -1062,9 +1862,13 @@ func init() { types.Add("pbm:PbmFetchVendorInfo", reflect.TypeOf((*PbmFetchVendorInfo)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmFetchVendorInfo`. type PbmFetchVendorInfoRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - ResourceType *PbmProfileResourceType `xml:"resourceType,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Specifies the resource type. The Server supports the STORAGE resource + // type only. If not specified, server defaults to STORAGE resource type. Any other + // resourceType is considered invalid. + ResourceType *PbmProfileResourceType `xml:"resourceType,omitempty" json:"resourceType,omitempty"` } func init() { @@ -1072,7 +1876,7 @@ func init() { } type PbmFetchVendorInfoResponse struct { - Returnval []PbmCapabilityVendorResourceTypeInfo `xml:"returnval,omitempty"` + Returnval []PbmCapabilityVendorResourceTypeInfo `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmFindApplicableDefaultProfile PbmFindApplicableDefaultProfileRequestType @@ -1081,9 +1885,12 @@ func init() { types.Add("pbm:PbmFindApplicableDefaultProfile", reflect.TypeOf((*PbmFindApplicableDefaultProfile)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmFindApplicableDefaultProfile`. type PbmFindApplicableDefaultProfileRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Datastores []PbmPlacementHub `xml:"datastores"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Datastores for which the default profile is found out. Note that + // the datastore pods/clusters are not supported. + Datastores []PbmPlacementHub `xml:"datastores" json:"datastores"` } func init() { @@ -1091,9 +1898,13 @@ func init() { } type PbmFindApplicableDefaultProfileResponse struct { - Returnval []BasePbmProfile `xml:"returnval,omitempty,typeattr"` + Returnval []BasePbmProfile `xml:"returnval,omitempty,typeattr" json:"returnval,omitempty"` } +// Warning fault used to indicate that the vendor specific datastore matches the tag in the +// requirements profile but doesnt match the vendor specific rule set in the requirements profile. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmIncompatibleVendorSpecificRuleSet struct { PbmCapabilityProfilePropertyMismatchFault } @@ -1108,10 +1919,19 @@ func init() { types.Add("pbm:PbmIncompatibleVendorSpecificRuleSetFault", reflect.TypeOf((*PbmIncompatibleVendorSpecificRuleSetFault)(nil)).Elem()) } +// LegacyHubsNotSupported fault is thrown to indicate the legacy hubs that are not supported. +// +// For storage, legacy hubs or datastores are VMFS and NFS datastores. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmLegacyHubsNotSupported struct { PbmFault - Hubs []PbmPlacementHub `xml:"hubs"` + // Legacy hubs that are not supported. + // + // Only datastores will be populated in this fault. Datastore clusters + // are not allowed. + Hubs []PbmPlacementHub `xml:"hubs" json:"hubs"` } func init() { @@ -1124,12 +1944,21 @@ func init() { types.Add("pbm:PbmLegacyHubsNotSupportedFault", reflect.TypeOf((*PbmLegacyHubsNotSupportedFault)(nil)).Elem()) } +// Describes Line of Service of a capability provider. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmLineOfServiceInfo struct { types.DynamicData - LineOfService string `xml:"lineOfService"` - Name PbmExtendedElementDescription `xml:"name"` - Description *PbmExtendedElementDescription `xml:"description,omitempty"` + // `PbmLineOfServiceInfoLineOfServiceEnum_enum` - must be one of the values + // for enum `PbmLineOfServiceInfoLineOfServiceEnum_enum`. + LineOfService string `xml:"lineOfService" json:"lineOfService"` + // Name of the service - for informational + // purposes only. + Name PbmExtendedElementDescription `xml:"name" json:"name"` + // Description of the service - for informational + // purposes only. + Description *PbmExtendedElementDescription `xml:"description,omitempty" json:"description,omitempty"` } func init() { @@ -1139,18 +1968,22 @@ func init() { type PbmLoggingConfiguration struct { types.DynamicData - Component string `xml:"component"` - LogLevel string `xml:"logLevel"` + Component string `xml:"component" json:"component"` + LogLevel string `xml:"logLevel" json:"logLevel"` } func init() { types.Add("pbm:PbmLoggingConfiguration", reflect.TypeOf((*PbmLoggingConfiguration)(nil)).Elem()) } +// NonExistentHubs is thrown to indicate that some non existent datastores are used. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmNonExistentHubs struct { PbmFault - Hubs []PbmPlacementHub `xml:"hubs"` + // Legacy hubs that do not exist. + Hubs []PbmPlacementHub `xml:"hubs" json:"hubs"` } func init() { @@ -1163,72 +1996,140 @@ func init() { types.Add("pbm:PbmNonExistentHubsFault", reflect.TypeOf((*PbmNonExistentHubsFault)(nil)).Elem()) } +// Describes the data services provided by the storage arrays. +// +// In addition to storing bits, some VASA providers may also want to separate +// their capabilities into lines of service to let vSphere manage finer grain +// policies. For example an array may support replication natively, and may +// want vSphere policies to be defined for the replication aspect separately +// and compose them with persistence related policies. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPersistenceBasedDataServiceInfo struct { PbmLineOfServiceInfo - CompatiblePersistenceSchemaNamespace []string `xml:"compatiblePersistenceSchemaNamespace,omitempty"` + // This property should be set with compatible schema namespaces exposed by + // the vendor provider. + // + // If not specified, vSphere assumes all Data Service + // provider schemas are compatible with all persistence provider namespaces + // advertised by the VASA provider. + CompatiblePersistenceSchemaNamespace []string `xml:"compatiblePersistenceSchemaNamespace,omitempty" json:"compatiblePersistenceSchemaNamespace,omitempty"` } func init() { types.Add("pbm:PbmPersistenceBasedDataServiceInfo", reflect.TypeOf((*PbmPersistenceBasedDataServiceInfo)(nil)).Elem()) } +// Requirement type containing capability constraints +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementCapabilityConstraintsRequirement struct { PbmPlacementRequirement - Constraints BasePbmCapabilityConstraints `xml:"constraints,typeattr"` + // Capability constraints + Constraints BasePbmCapabilityConstraints `xml:"constraints,typeattr" json:"constraints"` } func init() { types.Add("pbm:PbmPlacementCapabilityConstraintsRequirement", reflect.TypeOf((*PbmPlacementCapabilityConstraintsRequirement)(nil)).Elem()) } +// A Requirement for a particular `PbmCapabilityProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementCapabilityProfileRequirement struct { PbmPlacementRequirement - ProfileId PbmProfileId `xml:"profileId"` + // Reference to the capability profile being used as a requirement + ProfileId PbmProfileId `xml:"profileId" json:"profileId"` } func init() { types.Add("pbm:PbmPlacementCapabilityProfileRequirement", reflect.TypeOf((*PbmPlacementCapabilityProfileRequirement)(nil)).Elem()) } +// The `PbmPlacementCompatibilityResult` data object +// contains the compatibility result of a placement request. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementCompatibilityResult struct { types.DynamicData - Hub PbmPlacementHub `xml:"hub"` - MatchingResources []BasePbmPlacementMatchingResources `xml:"matchingResources,omitempty,typeattr"` - HowMany int64 `xml:"howMany,omitempty"` - Utilization []PbmPlacementResourceUtilization `xml:"utilization,omitempty"` - Warning []types.LocalizedMethodFault `xml:"warning,omitempty"` - Error []types.LocalizedMethodFault `xml:"error,omitempty"` + // The Datastore or StoragePod under consideration + // as a location for virtual machine files. + Hub PbmPlacementHub `xml:"hub" json:"hub"` + // Resources that match the policy. + // + // If populated, signifies that there are + // specific resources that match the policy for `PbmPlacementCompatibilityResult.hub`. If null, + // signifies that all resources (for example, hosts connected to the + // datastore or storage pod) are compatible. + MatchingResources []BasePbmPlacementMatchingResources `xml:"matchingResources,omitempty,typeattr" json:"matchingResources,omitempty"` + // How many objects of the kind requested can be provisioned on this + // `PbmPlacementCompatibilityResult.hub`. + HowMany int64 `xml:"howMany,omitempty" json:"howMany,omitempty"` + // This field is not populated if there is no size in the query, i.e. + // + // if the request carries only policy and no size requirements, this + // will not be populated. + Utilization []PbmPlacementResourceUtilization `xml:"utilization,omitempty" json:"utilization,omitempty"` + // Array of faults that describe issues that may affect profile compatibility. + // + // Users should consider these issues before using this Datastore + // or StoragePod and a connected Hosts. + Warning []types.LocalizedMethodFault `xml:"warning,omitempty" json:"warning,omitempty"` + // Array of faults that prevent this datastore or storage pod from being compatible with the + // specified profile, including if no host connected to this `PbmPlacementCompatibilityResult.hub` is compatible. + Error []types.LocalizedMethodFault `xml:"error,omitempty" json:"error,omitempty"` } func init() { types.Add("pbm:PbmPlacementCompatibilityResult", reflect.TypeOf((*PbmPlacementCompatibilityResult)(nil)).Elem()) } +// A `PbmPlacementHub` data object identifies a storage location +// where virtual machine files can be placed. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementHub struct { types.DynamicData - HubType string `xml:"hubType"` - HubId string `xml:"hubId"` + // Type of the hub. + // + // Currently ManagedObject is the only supported type. + HubType string `xml:"hubType" json:"hubType"` + // Hub identifier; a ManagedObjectReference to a datastore or a storage pod. + HubId string `xml:"hubId" json:"hubId"` } func init() { types.Add("pbm:PbmPlacementHub", reflect.TypeOf((*PbmPlacementHub)(nil)).Elem()) } +// Describes the collection of replication related resources that satisfy a +// policy, for a specific datastore. +// +// This class is returned only when the policy contains replication capabilities. +// For a storage pod, only those replication groups that are common across +// all datastores in the storage pod are considered compatible. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementMatchingReplicationResources struct { PbmPlacementMatchingResources - ReplicationGroup []types.ReplicationGroupId `xml:"replicationGroup,omitempty"` + // Replication groups that match the policy. + ReplicationGroup []types.ReplicationGroupId `xml:"replicationGroup,omitempty" json:"replicationGroup,omitempty"` } func init() { types.Add("pbm:PbmPlacementMatchingReplicationResources", reflect.TypeOf((*PbmPlacementMatchingReplicationResources)(nil)).Elem()) } +// Describes the collection of resources (for example, hosts) that satisfy a +// policy, for a specific datastore. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementMatchingResources struct { types.DynamicData } @@ -1237,6 +2138,9 @@ func init() { types.Add("pbm:PbmPlacementMatchingResources", reflect.TypeOf((*PbmPlacementMatchingResources)(nil)).Elem()) } +// Defines a constraint for placing objects onto `PbmPlacementHub`s. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementRequirement struct { types.DynamicData } @@ -1245,82 +2149,156 @@ func init() { types.Add("pbm:PbmPlacementRequirement", reflect.TypeOf((*PbmPlacementRequirement)(nil)).Elem()) } +// Describes the resource utilization metrics of a datastore. +// +// These results are not to be treated as a guaranteed availability, +// they are useful to estimate the effects of a change of policy +// or the effects of a provisioning action. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPlacementResourceUtilization struct { types.DynamicData - Name PbmExtendedElementDescription `xml:"name"` - Description PbmExtendedElementDescription `xml:"description"` - AvailableBefore int64 `xml:"availableBefore,omitempty"` - AvailableAfter int64 `xml:"availableAfter,omitempty"` - Total int64 `xml:"total,omitempty"` + // Name of the resource. + Name PbmExtendedElementDescription `xml:"name" json:"name"` + // Description of the resource. + Description PbmExtendedElementDescription `xml:"description" json:"description"` + // Currently available (i.e. + // + // before the provisioning step). + AvailableBefore int64 `xml:"availableBefore,omitempty" json:"availableBefore,omitempty"` + // Available after the provisioning step. + AvailableAfter int64 `xml:"availableAfter,omitempty" json:"availableAfter,omitempty"` + // Total resource availability + Total int64 `xml:"total,omitempty" json:"total,omitempty"` } func init() { types.Add("pbm:PbmPlacementResourceUtilization", reflect.TypeOf((*PbmPlacementResourceUtilization)(nil)).Elem()) } +// The `PbmProfile` data object is the base object +// for storage capability profiles. +// +// This object defines metadata +// for the profile. The derived capability profile represents the +// user's intent for selection and configuration of storage resources +// and/or services that support deployment of virtual machines +// and virtual disks. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmProfile struct { types.DynamicData - ProfileId PbmProfileId `xml:"profileId"` - Name string `xml:"name"` - Description string `xml:"description,omitempty"` - CreationTime time.Time `xml:"creationTime"` - CreatedBy string `xml:"createdBy"` - LastUpdatedTime time.Time `xml:"lastUpdatedTime"` - LastUpdatedBy string `xml:"lastUpdatedBy"` + // Unique identifier for the profile. + ProfileId PbmProfileId `xml:"profileId" json:"profileId"` + Name string `xml:"name" json:"name"` + // Profile description. + Description string `xml:"description,omitempty" json:"description,omitempty"` + // Time stamp of profile creation. + CreationTime time.Time `xml:"creationTime" json:"creationTime"` + // User name of the profile creator. + // + // Set during creation time. + CreatedBy string `xml:"createdBy" json:"createdBy"` + // Time stamp of latest modification to the profile. + LastUpdatedTime time.Time `xml:"lastUpdatedTime" json:"lastUpdatedTime"` + // Name of the user performing the latest modification of the profile. + LastUpdatedBy string `xml:"lastUpdatedBy" json:"lastUpdatedBy"` } func init() { types.Add("pbm:PbmProfile", reflect.TypeOf((*PbmProfile)(nil)).Elem()) } +// Profile unique identifier. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmProfileId struct { types.DynamicData - UniqueId string `xml:"uniqueId"` + // Unique identifier of the profile. + UniqueId string `xml:"uniqueId" json:"uniqueId"` } func init() { types.Add("pbm:PbmProfileId", reflect.TypeOf((*PbmProfileId)(nil)).Elem()) } +// The `PbmProfileOperationOutcome` data object describes the result +// of a `PbmProfileProfileManager` operation. +// +// If there was an +// error during the operation, the object identifies the fault. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmProfileOperationOutcome struct { types.DynamicData - ProfileId PbmProfileId `xml:"profileId"` - Fault *types.LocalizedMethodFault `xml:"fault,omitempty"` + // Identifies the profile specified for the operation. + ProfileId PbmProfileId `xml:"profileId" json:"profileId"` + // One of the `PbmFault` objects. + Fault *types.LocalizedMethodFault `xml:"fault,omitempty" json:"fault,omitempty"` } func init() { types.Add("pbm:PbmProfileOperationOutcome", reflect.TypeOf((*PbmProfileOperationOutcome)(nil)).Elem()) } +// The `PbmProfileResourceType` data object defines the vSphere resource type +// that is supported for profile management. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmProfileResourceType struct { types.DynamicData - ResourceType string `xml:"resourceType"` + // Type of resource to which capability information applies. + // + // resourceType is a string value that corresponds to + // a `PbmProfileResourceTypeEnum_enum` enumeration value. + // Only the STORAGE resource type is supported. + ResourceType string `xml:"resourceType" json:"resourceType"` } func init() { types.Add("pbm:PbmProfileResourceType", reflect.TypeOf((*PbmProfileResourceType)(nil)).Elem()) } +// The `PbmProfileType` identifier is defined by storage providers +// to distinguish between different types of profiles plugged into the system. +// +// An example of a system supported profile type is "CapabilityBasedProfileType" +// which will be the type used for all capability-based profiles created by +// the system using capability metadata information published to the system. +// +// For internal use only. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmProfileType struct { types.DynamicData - UniqueId string `xml:"uniqueId"` + // Unique type identifier for this profile type. + // + // eg "CapabilityBased", or other. + UniqueId string `xml:"uniqueId" json:"uniqueId"` } func init() { types.Add("pbm:PbmProfileType", reflect.TypeOf((*PbmProfileType)(nil)).Elem()) } +// Fault used to indicate which property instance in requirements profile that does not +// match. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmPropertyMismatchFault struct { PbmCompatibilityCheckFault - CapabilityInstanceId PbmCapabilityMetadataUniqueId `xml:"capabilityInstanceId"` - RequirementPropertyInstance PbmCapabilityPropertyInstance `xml:"requirementPropertyInstance"` + // Id of the CapabilityInstance in requirements profile that + // does not match. + CapabilityInstanceId PbmCapabilityMetadataUniqueId `xml:"capabilityInstanceId" json:"capabilityInstanceId"` + // The property instance in requirement profile that does not match. + RequirementPropertyInstance PbmCapabilityPropertyInstance `xml:"requirementPropertyInstance" json:"requirementPropertyInstance"` } func init() { @@ -1339,9 +2317,11 @@ func init() { types.Add("pbm:PbmQueryAssociatedEntities", reflect.TypeOf((*PbmQueryAssociatedEntities)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQueryAssociatedEntities`. type PbmQueryAssociatedEntitiesRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Profiles []PbmProfileId `xml:"profiles,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Storage policy array. + Profiles []PbmProfileId `xml:"profiles,omitempty" json:"profiles,omitempty"` } func init() { @@ -1349,7 +2329,7 @@ func init() { } type PbmQueryAssociatedEntitiesResponse struct { - Returnval []PbmQueryProfileResult `xml:"returnval,omitempty"` + Returnval []PbmQueryProfileResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryAssociatedEntity PbmQueryAssociatedEntityRequestType @@ -1358,10 +2338,16 @@ func init() { types.Add("pbm:PbmQueryAssociatedEntity", reflect.TypeOf((*PbmQueryAssociatedEntity)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQueryAssociatedEntity`. type PbmQueryAssociatedEntityRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Profile PbmProfileId `xml:"profile"` - EntityType string `xml:"entityType,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Profile identifier. + Profile PbmProfileId `xml:"profile" json:"profile"` + // If specified, the method returns only those entities + // which match the type. The entityType string value must match + // one of the `PbmObjectType_enum` values. + // If not specified, the method returns all entities associated with the profile. + EntityType string `xml:"entityType,omitempty" json:"entityType,omitempty"` } func init() { @@ -1369,7 +2355,7 @@ func init() { } type PbmQueryAssociatedEntityResponse struct { - Returnval []PbmServerObjectRef `xml:"returnval,omitempty"` + Returnval []PbmServerObjectRef `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryAssociatedProfile PbmQueryAssociatedProfileRequestType @@ -1378,9 +2364,11 @@ func init() { types.Add("pbm:PbmQueryAssociatedProfile", reflect.TypeOf((*PbmQueryAssociatedProfile)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQueryAssociatedProfile`. type PbmQueryAssociatedProfileRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Entity PbmServerObjectRef `xml:"entity"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Reference to a virtual machine, virtual disk, or datastore. + Entity PbmServerObjectRef `xml:"entity" json:"entity"` } func init() { @@ -1388,7 +2376,7 @@ func init() { } type PbmQueryAssociatedProfileResponse struct { - Returnval []PbmProfileId `xml:"returnval,omitempty"` + Returnval []PbmProfileId `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryAssociatedProfiles PbmQueryAssociatedProfilesRequestType @@ -1397,9 +2385,11 @@ func init() { types.Add("pbm:PbmQueryAssociatedProfiles", reflect.TypeOf((*PbmQueryAssociatedProfiles)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQueryAssociatedProfiles`. type PbmQueryAssociatedProfilesRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Entities []PbmServerObjectRef `xml:"entities"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Array of server object references. + Entities []PbmServerObjectRef `xml:"entities" json:"entities"` } func init() { @@ -1407,7 +2397,7 @@ func init() { } type PbmQueryAssociatedProfilesResponse struct { - Returnval []PbmQueryProfileResult `xml:"returnval,omitempty"` + Returnval []PbmQueryProfileResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryByRollupComplianceStatus PbmQueryByRollupComplianceStatusRequestType @@ -1416,9 +2406,11 @@ func init() { types.Add("pbm:PbmQueryByRollupComplianceStatus", reflect.TypeOf((*PbmQueryByRollupComplianceStatus)(nil)).Elem()) } +// The parameters of `PbmComplianceManager.PbmQueryByRollupComplianceStatus`. type PbmQueryByRollupComplianceStatusRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Status string `xml:"status"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // `PbmComplianceStatus_enum` + Status string `xml:"status" json:"status"` } func init() { @@ -1426,7 +2418,7 @@ func init() { } type PbmQueryByRollupComplianceStatusResponse struct { - Returnval []PbmServerObjectRef `xml:"returnval,omitempty"` + Returnval []PbmServerObjectRef `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryDefaultRequirementProfile PbmQueryDefaultRequirementProfileRequestType @@ -1435,9 +2427,11 @@ func init() { types.Add("pbm:PbmQueryDefaultRequirementProfile", reflect.TypeOf((*PbmQueryDefaultRequirementProfile)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQueryDefaultRequirementProfile`. type PbmQueryDefaultRequirementProfileRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Hub PbmPlacementHub `xml:"hub"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Placement hub (i.e. datastore). + Hub PbmPlacementHub `xml:"hub" json:"hub"` } func init() { @@ -1445,7 +2439,7 @@ func init() { } type PbmQueryDefaultRequirementProfileResponse struct { - Returnval *PbmProfileId `xml:"returnval,omitempty"` + Returnval *PbmProfileId `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryDefaultRequirementProfiles PbmQueryDefaultRequirementProfilesRequestType @@ -1454,9 +2448,13 @@ func init() { types.Add("pbm:PbmQueryDefaultRequirementProfiles", reflect.TypeOf((*PbmQueryDefaultRequirementProfiles)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQueryDefaultRequirementProfiles`. type PbmQueryDefaultRequirementProfilesRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Datastores []PbmPlacementHub `xml:"datastores"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // The datastores for which the default profiles are requested. For + // legacy datastores we set + // `DefaultProfileInfo.defaultProfile` to `null`. + Datastores []PbmPlacementHub `xml:"datastores" json:"datastores"` } func init() { @@ -1464,7 +2462,7 @@ func init() { } type PbmQueryDefaultRequirementProfilesResponse struct { - Returnval []PbmDefaultProfileInfo `xml:"returnval"` + Returnval []PbmDefaultProfileInfo `xml:"returnval" json:"returnval"` } type PbmQueryMatchingHub PbmQueryMatchingHubRequestType @@ -1473,10 +2471,15 @@ func init() { types.Add("pbm:PbmQueryMatchingHub", reflect.TypeOf((*PbmQueryMatchingHub)(nil)).Elem()) } +// The parameters of `PbmPlacementSolver.PbmQueryMatchingHub`. type PbmQueryMatchingHubRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty"` - Profile PbmProfileId `xml:"profile"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Candidate list of hubs, either datastores or storage pods or a + // mix. If this parameter is not specified, the Server uses all + // of the datastores and storage pods. + HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty" json:"hubsToSearch,omitempty"` + // Storage requirement profile. + Profile PbmProfileId `xml:"profile" json:"profile"` } func init() { @@ -1484,7 +2487,7 @@ func init() { } type PbmQueryMatchingHubResponse struct { - Returnval []PbmPlacementHub `xml:"returnval,omitempty"` + Returnval []PbmPlacementHub `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryMatchingHubWithSpec PbmQueryMatchingHubWithSpecRequestType @@ -1493,10 +2496,15 @@ func init() { types.Add("pbm:PbmQueryMatchingHubWithSpec", reflect.TypeOf((*PbmQueryMatchingHubWithSpec)(nil)).Elem()) } +// The parameters of `PbmPlacementSolver.PbmQueryMatchingHubWithSpec`. type PbmQueryMatchingHubWithSpecRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty"` - CreateSpec PbmCapabilityProfileCreateSpec `xml:"createSpec"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Candidate list of hubs, either datastores or storage + // pods or a mix. If this parameter is not specified, the Server uses + // all of the datastores and storage pods for placement compatibility checking. + HubsToSearch []PbmPlacementHub `xml:"hubsToSearch,omitempty" json:"hubsToSearch,omitempty"` + // Storage profile creation specification. + CreateSpec PbmCapabilityProfileCreateSpec `xml:"createSpec" json:"createSpec"` } func init() { @@ -1504,7 +2512,7 @@ func init() { } type PbmQueryMatchingHubWithSpecResponse struct { - Returnval []PbmPlacementHub `xml:"returnval,omitempty"` + Returnval []PbmPlacementHub `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQueryProfile PbmQueryProfileRequestType @@ -1513,10 +2521,16 @@ func init() { types.Add("pbm:PbmQueryProfile", reflect.TypeOf((*PbmQueryProfile)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQueryProfile`. type PbmQueryProfileRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - ResourceType PbmProfileResourceType `xml:"resourceType"` - ProfileCategory string `xml:"profileCategory,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Type of resource. You can specify only STORAGE. + ResourceType PbmProfileResourceType `xml:"resourceType" json:"resourceType"` + // Profile category. The string value must correspond + // to one of the `PbmProfileCategoryEnum_enum` values. + // If you do not specify a profile category, the method returns profiles in all + // categories. + ProfileCategory string `xml:"profileCategory,omitempty" json:"profileCategory,omitempty"` } func init() { @@ -1524,27 +2538,48 @@ func init() { } type PbmQueryProfileResponse struct { - Returnval []PbmProfileId `xml:"returnval,omitempty"` + Returnval []PbmProfileId `xml:"returnval,omitempty" json:"returnval,omitempty"` } +// The `PbmQueryProfileResult` data object +// identifies a virtual machine, virtual disk, or datastore +// and it lists the identifier(s) for the associated profile(s). +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryProfileResult struct { types.DynamicData - Object PbmServerObjectRef `xml:"object"` - ProfileId []PbmProfileId `xml:"profileId,omitempty"` - Fault *types.LocalizedMethodFault `xml:"fault,omitempty"` + // Reference to the virtual machine, virtual disk, or + // datastore on which the query was performed. + Object PbmServerObjectRef `xml:"object" json:"object"` + // Array of identifiers for profiles which are associated with object. + ProfileId []PbmProfileId `xml:"profileId,omitempty" json:"profileId,omitempty"` + // Fault associated with the query, if there is one. + Fault *types.LocalizedMethodFault `xml:"fault,omitempty" json:"fault,omitempty"` } func init() { types.Add("pbm:PbmQueryProfileResult", reflect.TypeOf((*PbmQueryProfileResult)(nil)).Elem()) } +// The `PbmQueryReplicationGroupResult` data object +// identifies a virtual machine, or a virtual disk and lists the identifier(s) for the associated +// replication group. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryReplicationGroupResult struct { types.DynamicData - Object PbmServerObjectRef `xml:"object"` - ReplicationGroupId *types.ReplicationGroupId `xml:"replicationGroupId,omitempty"` - Fault *types.LocalizedMethodFault `xml:"fault,omitempty"` + // Reference to the virtual machine or virtual disk on which the query was performed. + // + // If the + // query was performed for a virtual machine and all it's disks, this will reference each disk + // and the virtual machine config individually. + Object PbmServerObjectRef `xml:"object" json:"object"` + // Replication group identifier which is associated with object. + ReplicationGroupId *types.ReplicationGroupId `xml:"replicationGroupId,omitempty" json:"replicationGroupId,omitempty"` + // Fault associated with the query, if there is one. + Fault *types.LocalizedMethodFault `xml:"fault,omitempty" json:"fault,omitempty"` } func init() { @@ -1557,9 +2592,15 @@ func init() { types.Add("pbm:PbmQueryReplicationGroups", reflect.TypeOf((*PbmQueryReplicationGroups)(nil)).Elem()) } +// The parameters of `PbmReplicationManager.PbmQueryReplicationGroups`. type PbmQueryReplicationGroupsRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Entities []PbmServerObjectRef `xml:"entities,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Array of server object references. Valid types are + // `virtualMachine`, + // `virtualMachineAndDisks`, + // `virtualDiskId`, + // `virtualDiskUUID` + Entities []PbmServerObjectRef `xml:"entities,omitempty" json:"entities,omitempty"` } func init() { @@ -1567,7 +2608,7 @@ func init() { } type PbmQueryReplicationGroupsResponse struct { - Returnval []PbmQueryReplicationGroupResult `xml:"returnval,omitempty"` + Returnval []PbmQueryReplicationGroupResult `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmQuerySpaceStatsForStorageContainer PbmQuerySpaceStatsForStorageContainerRequestType @@ -1576,10 +2617,15 @@ func init() { types.Add("pbm:PbmQuerySpaceStatsForStorageContainer", reflect.TypeOf((*PbmQuerySpaceStatsForStorageContainer)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmQuerySpaceStatsForStorageContainer`. type PbmQuerySpaceStatsForStorageContainerRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Datastore PbmServerObjectRef `xml:"datastore"` - CapabilityProfileId []PbmProfileId `xml:"capabilityProfileId,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Entity for which space statistics are being requested i.e datastore. + Datastore PbmServerObjectRef `xml:"datastore" json:"datastore"` + // \- capability profile Ids. + // If omitted, the statistics for the container + // as a whole would be returned. + CapabilityProfileId []PbmProfileId `xml:"capabilityProfileId,omitempty" json:"capabilityProfileId,omitempty"` } func init() { @@ -1587,7 +2633,7 @@ func init() { } type PbmQuerySpaceStatsForStorageContainerResponse struct { - Returnval []PbmDatastoreSpaceStatistics `xml:"returnval,omitempty"` + Returnval []PbmDatastoreSpaceStatistics `xml:"returnval,omitempty" json:"returnval,omitempty"` } type PbmResetDefaultRequirementProfile PbmResetDefaultRequirementProfileRequestType @@ -1596,9 +2642,11 @@ func init() { types.Add("pbm:PbmResetDefaultRequirementProfile", reflect.TypeOf((*PbmResetDefaultRequirementProfile)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmResetDefaultRequirementProfile`. type PbmResetDefaultRequirementProfileRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - Profile *PbmProfileId `xml:"profile,omitempty"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Profile to reset. + Profile *PbmProfileId `xml:"profile,omitempty" json:"profile,omitempty"` } func init() { @@ -1615,7 +2663,7 @@ func init() { } type PbmResetVSanDefaultProfileRequestType struct { - This types.ManagedObjectReference `xml:"_this"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` } func init() { @@ -1625,11 +2673,20 @@ func init() { type PbmResetVSanDefaultProfileResponse struct { } +// A ResourceInUse fault indicating that some error has occurred because a +// resource was in use. +// +// Information about the resource that is in use may +// be supplied. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmResourceInUse struct { PbmFault - Type string `xml:"type,omitempty"` - Name string `xml:"name,omitempty"` + // Type of resource that is in use. + Type string `xml:"type,omitempty" json:"type,omitempty"` + // Name of the instance of the resource that is in use. + Name string `xml:"name,omitempty" json:"name,omitempty"` } func init() { @@ -1648,9 +2705,11 @@ func init() { types.Add("pbm:PbmRetrieveContent", reflect.TypeOf((*PbmRetrieveContent)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmRetrieveContent`. type PbmRetrieveContentRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - ProfileIds []PbmProfileId `xml:"profileIds"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Array of storage profile identifiers. + ProfileIds []PbmProfileId `xml:"profileIds" json:"profileIds"` } func init() { @@ -1658,7 +2717,7 @@ func init() { } type PbmRetrieveContentResponse struct { - Returnval []BasePbmProfile `xml:"returnval,typeattr"` + Returnval []BasePbmProfile `xml:"returnval,typeattr" json:"returnval"` } type PbmRetrieveServiceContent PbmRetrieveServiceContentRequestType @@ -1668,7 +2727,7 @@ func init() { } type PbmRetrieveServiceContentRequestType struct { - This types.ManagedObjectReference `xml:"_this"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` } func init() { @@ -1676,47 +2735,154 @@ func init() { } type PbmRetrieveServiceContentResponse struct { - Returnval PbmServiceInstanceContent `xml:"returnval"` + Returnval PbmServiceInstanceContent `xml:"returnval" json:"returnval"` } +// The `PbmRollupComplianceResult` data object identifies the virtual machine +// for which rollup compliance was checked, and it contains the overall status +// and a list of compliance result objects. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmRollupComplianceResult struct { types.DynamicData - OldestCheckTime time.Time `xml:"oldestCheckTime"` - Entity PbmServerObjectRef `xml:"entity"` - OverallComplianceStatus string `xml:"overallComplianceStatus"` - OverallComplianceTaskStatus string `xml:"overallComplianceTaskStatus,omitempty"` - Result []PbmComplianceResult `xml:"result,omitempty"` - ErrorCause []types.LocalizedMethodFault `xml:"errorCause,omitempty"` - ProfileMismatch bool `xml:"profileMismatch"` + // Indicates the earliest time that compliance was checked for any + // of the entities in the rollup compliance check. + // + // The compliance + // check time for a single entity is represented in the + // `PbmComplianceResult*.*PbmComplianceResult.checkTime` + // property. If the `PbmComplianceResult.checkTime` + // property is unset for any of the objects in the results + // array, the oldestCheckTime property will be unset. + OldestCheckTime time.Time `xml:"oldestCheckTime" json:"oldestCheckTime"` + // Virtual machine for which the rollup compliance was checked. + Entity PbmServerObjectRef `xml:"entity" json:"entity"` + // Overall compliance status of the virtual machine and its virtual disks. + // + // overallComplianceStatus is a string value that + // corresponds to one of the + // `PbmComplianceResult*.*PbmComplianceResult.complianceStatus` + // values. + // + // The overall compliance status is determined by the following rules, applied in the order + // listed: + // - If all the entities are compliant, the overall status is + // compliant. + // - Else if any entity's status is outOfDate, the overall status is + // outOfDate. + // - Else if any entity's status is nonCompliant, the overall status is + // nonCompliant. + // - Else if any entity's status is unknown, the overall status is + // unknown. + // - Else if any entity's status is notApplicable, the overall status is + // notApplicable. + OverallComplianceStatus string `xml:"overallComplianceStatus" json:"overallComplianceStatus"` + // Overall compliance task status of the virtual machine and its virtual + // disks. + // + // overallComplianceTaskStatus is a string value that + // corresponds to one of the `PbmComplianceResult`. + // `PbmComplianceResult.complianceTaskStatus` values. + OverallComplianceTaskStatus string `xml:"overallComplianceTaskStatus,omitempty" json:"overallComplianceTaskStatus,omitempty"` + // Individual compliance results that make up the rollup. + Result []PbmComplianceResult `xml:"result,omitempty" json:"result,omitempty"` + // This property is set if the overall compliance task fails with some error. + // + // This + // property indicates the causes of error. If there are multiple failures, it stores + // these failure in this array. + ErrorCause []types.LocalizedMethodFault `xml:"errorCause,omitempty" json:"errorCause,omitempty"` + // Deprecated as of vSphere 2016, use + // `PbmRollupComplianceResult.overallComplianceStatus` + // to know if profile mismatch has occurred. If + // overallComplianceStatus value is outOfDate, it means + // profileMismatch has occurred. + // + // True if and only if `PbmComplianceResult`. + // + // `PbmComplianceResult.mismatch` is true for at least one + // entity in the rollup compliance check. + ProfileMismatch bool `xml:"profileMismatch" json:"profileMismatch"` } func init() { types.Add("pbm:PbmRollupComplianceResult", reflect.TypeOf((*PbmRollupComplianceResult)(nil)).Elem()) } +// The `PbmServerObjectRef` data object identifies +// a virtual machine, +// virtual disk attached to a virtual machine, +// a first class storage object +// or a datastore. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmServerObjectRef struct { types.DynamicData - ObjectType string `xml:"objectType"` - Key string `xml:"key"` - ServerUuid string `xml:"serverUuid,omitempty"` + // Type of vSphere Server object. + // + // The value of the objectType string + // corresponds to one of the `PbmObjectType_enum` + // enumerated type values. + ObjectType string `xml:"objectType" json:"objectType"` + // Unique identifier for the object. + // + // The value of key depends + // on the objectType. + // + // + // + // + // + // + // + // + // + //
`*PbmObjectType***`key value**
virtualMachine_virtual-machine-MOR_
virtualDiskId_virtual-disk-MOR_:_VirtualDisk.key_
datastore_datastore-MOR_
MOR = ManagedObjectReference
+ Key string `xml:"key" json:"key"` + // vCenter Server UUID; the ServiceContent.about.instanceUuid + // property in the vSphere API. + ServerUuid string `xml:"serverUuid,omitempty" json:"serverUuid,omitempty"` } func init() { types.Add("pbm:PbmServerObjectRef", reflect.TypeOf((*PbmServerObjectRef)(nil)).Elem()) } +// The `PbmServiceInstanceContent` data object defines properties for the +// `PbmServiceInstance` managed object. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmServiceInstanceContent struct { types.DynamicData - AboutInfo PbmAboutInfo `xml:"aboutInfo"` - SessionManager types.ManagedObjectReference `xml:"sessionManager"` - CapabilityMetadataManager types.ManagedObjectReference `xml:"capabilityMetadataManager"` - ProfileManager types.ManagedObjectReference `xml:"profileManager"` - ComplianceManager types.ManagedObjectReference `xml:"complianceManager"` - PlacementSolver types.ManagedObjectReference `xml:"placementSolver"` - ReplicationManager *types.ManagedObjectReference `xml:"replicationManager,omitempty"` + // Contains information that identifies the Storage Policy service. + AboutInfo PbmAboutInfo `xml:"aboutInfo" json:"aboutInfo"` + // For internal use. + // + // Refers instance of `PbmSessionManager`. + SessionManager types.ManagedObjectReference `xml:"sessionManager" json:"sessionManager"` + // For internal use. + // + // Refers instance of `PbmCapabilityMetadataManager`. + CapabilityMetadataManager types.ManagedObjectReference `xml:"capabilityMetadataManager" json:"capabilityMetadataManager"` + // Provides access to the Storage Policy ProfileManager. + // + // Refers instance of `PbmProfileProfileManager`. + ProfileManager types.ManagedObjectReference `xml:"profileManager" json:"profileManager"` + // Provides access to the Storage Policy ComplianceManager. + // + // Refers instance of `PbmComplianceManager`. + ComplianceManager types.ManagedObjectReference `xml:"complianceManager" json:"complianceManager"` + // Provides access to the Storage Policy PlacementSolver. + // + // Refers instance of `PbmPlacementSolver`. + PlacementSolver types.ManagedObjectReference `xml:"placementSolver" json:"placementSolver"` + // Provides access to the Storage Policy ReplicationManager. + // + // Refers instance of `PbmReplicationManager`. + ReplicationManager *types.ManagedObjectReference `xml:"replicationManager,omitempty" json:"replicationManager,omitempty"` } func init() { @@ -1729,10 +2895,13 @@ func init() { types.Add("pbm:PbmUpdate", reflect.TypeOf((*PbmUpdate)(nil)).Elem()) } +// The parameters of `PbmProfileProfileManager.PbmUpdate`. type PbmUpdateRequestType struct { - This types.ManagedObjectReference `xml:"_this"` - ProfileId PbmProfileId `xml:"profileId"` - UpdateSpec PbmCapabilityProfileUpdateSpec `xml:"updateSpec"` + This types.ManagedObjectReference `xml:"_this" json:"_this"` + // Profile identifier. + ProfileId PbmProfileId `xml:"profileId" json:"profileId"` + // Capability-based update specification. + UpdateSpec PbmCapabilityProfileUpdateSpec `xml:"updateSpec" json:"updateSpec"` } func init() { @@ -1742,6 +2911,10 @@ func init() { type PbmUpdateResponse struct { } +// Information about a supported data service provided using +// vSphere APIs for IO Filtering (VAIO) data service provider. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmVaioDataServiceInfo struct { PbmLineOfServiceInfo } diff --git a/vendor/github.com/vmware/govmomi/property/collector.go b/vendor/github.com/vmware/govmomi/property/collector.go index 8798ceacbf..16bf222669 100644 --- a/vendor/github.com/vmware/govmomi/property/collector.go +++ b/vendor/github.com/vmware/govmomi/property/collector.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +31,6 @@ import ( // // For more information, see: // http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.wssdk.apiref.doc%2Fvmodl.query.PropertyCollector.html -// type Collector struct { roundTripper soap.RoundTripper reference types.ManagedObjectReference diff --git a/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go b/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go index 12f910b2e4..4a1579d3ed 100644 --- a/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go +++ b/vendor/github.com/vmware/govmomi/simulator/cluster_compute_resource.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -66,10 +66,11 @@ func (add *addHost) Run(task *Task) (types.AnyType, types.BaseMethodFault) { } host := NewHostSystem(template) - host.configure(spec, add.req.AsConnected) + host.configure(task.ctx, spec, add.req.AsConnected) task.ctx.Map.PutEntity(cr, task.ctx.Map.NewEntity(host)) host.Summary.Host = &host.Self + host.Config.Host = host.Self cr.Host = append(cr.Host, host.Reference()) addComputeResource(cr.Summary.GetComputeResourceSummary(), host) @@ -350,6 +351,40 @@ func (c *ClusterComputeResource) ReconfigureComputeResourceTask(ctx *Context, re } } +func (c *ClusterComputeResource) MoveIntoTask(ctx *Context, req *types.MoveInto_Task) soap.HasFault { + task := CreateTask(c, "moveInto", func(*Task) (types.AnyType, types.BaseMethodFault) { + for _, ref := range req.Host { + host := ctx.Map.Get(ref).(*HostSystem) + + if *host.Parent == c.Self { + return nil, new(types.DuplicateName) // host already in this cluster + } + + switch parent := ctx.Map.Get(*host.Parent).(type) { + case *ClusterComputeResource: + if !host.Runtime.InMaintenanceMode { + return nil, new(types.InvalidState) + } + + RemoveReference(&parent.Host, ref) + case *mo.ComputeResource: + ctx.Map.Remove(ctx, parent.Self) + } + + c.Host = append(c.Host, ref) + host.Parent = &c.Self + } + + return nil, nil + }) + + return &methods.MoveInto_TaskBody{ + Res: &types.MoveInto_TaskResponse{ + Returnval: task.Run(ctx), + }, + } +} + func (c *ClusterComputeResource) PlaceVm(ctx *Context, req *types.PlaceVm) soap.HasFault { body := new(methods.PlaceVmBody) diff --git a/vendor/github.com/vmware/govmomi/simulator/container.go b/vendor/github.com/vmware/govmomi/simulator/container.go index fec1c0f486..f39ef99702 100644 --- a/vendor/github.com/vmware/govmomi/simulator/container.go +++ b/vendor/github.com/vmware/govmomi/simulator/container.go @@ -18,29 +18,32 @@ package simulator import ( "archive/tar" + "bufio" "bytes" - "encoding/hex" + "context" "encoding/json" + "errors" "fmt" "io" "log" - "net/http" + "net" "os" "os/exec" "path" "regexp" - "strconv" "strings" + "sync" "time" - - "github.com/google/uuid" - - "github.com/vmware/govmomi/vim25/methods" - "github.com/vmware/govmomi/vim25/types" ) var ( - shell = "/bin/sh" + shell = "/bin/sh" + eventWatch eventWatcher +) + +const ( + deleteWithContainer = "lifecycle=container" + createdByVcsim = "createdBy=vcsim" ) func init() { @@ -49,10 +52,26 @@ func init() { } } +type eventWatcher struct { + sync.Mutex + + stdin io.WriteCloser + stdout io.ReadCloser + process *os.Process + + // watches is a map of container IDs to container objects + watches map[string]*container +} + // container provides methods to manage a container within a simulator VM lifecycle. type container struct { + sync.Mutex + id string name string + + cancelWatch context.CancelFunc + changes chan struct{} } type networkSettings struct { @@ -62,551 +81,753 @@ type networkSettings struct { MacAddress string } -// inspect applies container network settings to vm.Guest properties. -func (c *container) inspect(vm *VirtualMachine) error { - if c.id == "" { - return nil +type containerDetails struct { + State struct { + Running bool + Paused bool } + NetworkSettings struct { + networkSettings + Networks map[string]networkSettings + } +} - var objects []struct { - State struct { - Running bool - Paused bool - } - NetworkSettings struct { - networkSettings - Networks map[string]networkSettings - } +type unknownContainer error +type uninitializedContainer error + +var sanitizeNameRx = regexp.MustCompile(`[\(\)\s]`) + +func sanitizeName(name string) string { + return sanitizeNameRx.ReplaceAllString(name, "-") +} + +func constructContainerName(name, uid string) string { + return fmt.Sprintf("vcsim-%s-%s", sanitizeName(name), uid) +} + +func constructVolumeName(containerName, uid, volumeName string) string { + return constructContainerName(containerName, uid) + "--" + sanitizeName(volumeName) +} + +func extractNameAndUid(containerName string) (name string, uid string, err error) { + parts := strings.Split(strings.TrimPrefix(containerName, "vcsim-"), "-") + if len(parts) != 2 { + err = fmt.Errorf("container name does not match expected vcsim-name-uid format: %s", containerName) + return } - cmd := exec.Command("docker", "inspect", c.id) - out, err := cmd.Output() + return parts[0], parts[1], nil +} + +func prefixToMask(prefix int) string { + mask := net.CIDRMask(prefix, 32) + return fmt.Sprintf("%d.%d.%d.%d", mask[0], mask[1], mask[2], mask[3]) +} + +type tarEntry struct { + header *tar.Header + content []byte +} + +// From https://docs.docker.com/engine/reference/commandline/cp/ : +// > It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. +// > However, you can still copy such files by manually running tar in docker exec. +func copyToGuest(id string, dest string, length int64, reader io.Reader) error { + cmd := exec.Command("docker", "exec", "-i", id, "tar", "Cxf", path.Dir(dest), "-") + cmd.Stderr = os.Stderr + stdin, err := cmd.StdinPipe() if err != nil { return err } - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&objects); err != nil { + + err = cmd.Start() + if err != nil { return err } - vm.Config.Annotation = strings.Join(cmd.Args, " ") - vm.logPrintf("%s: %s", vm.Config.Annotation, string(out)) - - for _, o := range objects { - s := o.NetworkSettings.networkSettings + tw := tar.NewWriter(stdin) + _ = tw.WriteHeader(&tar.Header{ + Name: path.Base(dest), + Size: length, + Mode: 0444, + ModTime: time.Now(), + }) - for _, n := range o.NetworkSettings.Networks { - s = n - break - } + _, err = io.Copy(tw, reader) - if o.State.Paused { - vm.Runtime.PowerState = types.VirtualMachinePowerStateSuspended - } else if o.State.Running { - vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOn - } else { - vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff - } + twErr := tw.Close() + stdinErr := stdin.Close() - vm.Guest.IpAddress = s.IPAddress - vm.Summary.Guest.IpAddress = s.IPAddress - - if len(vm.Guest.Net) != 0 { - net := &vm.Guest.Net[0] - net.IpAddress = []string{s.IPAddress} - net.MacAddress = s.MacAddress - net.IpConfig = &types.NetIpConfigInfo{ - IpAddress: []types.NetIpConfigInfoIpAddress{{ - IpAddress: s.IPAddress, - PrefixLength: int32(s.IPPrefixLen), - State: string(types.NetIpConfigInfoIpAddressStatusPreferred), - }}, - } - } + waitErr := cmd.Wait() - for _, d := range vm.Config.Hardware.Device { - if eth, ok := d.(types.BaseVirtualEthernetCard); ok { - eth.GetVirtualEthernetCard().MacAddress = s.MacAddress - break - } - } + if err != nil || twErr != nil || stdinErr != nil || waitErr != nil { + return fmt.Errorf("copy: {%s}, tw: {%s}, stdin: {%s}, wait: {%s}", err, twErr, stdinErr, waitErr) } return nil } -func (c *container) prepareGuestOperation( - vm *VirtualMachine, - auth types.BaseGuestAuthentication) types.BaseMethodFault { - - if c.id == "" { - return new(types.GuestOperationsUnavailable) +func copyFromGuest(id string, src string, sink func(int64, io.Reader) error) error { + cmd := exec.Command("docker", "exec", id, "tar", "Ccf", path.Dir(src), "-", path.Base(src)) + cmd.Stderr = os.Stderr + stdout, err := cmd.StdoutPipe() + if err != nil { + return err } - if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { - return &types.InvalidPowerState{ - RequestedState: types.VirtualMachinePowerStatePoweredOn, - ExistingState: vm.Runtime.PowerState, - } + if err = cmd.Start(); err != nil { + return err } - switch creds := auth.(type) { - case *types.NamePasswordAuthentication: - if creds.Username == "" || creds.Password == "" { - return new(types.InvalidGuestLogin) - } - default: - return new(types.InvalidGuestLogin) + + tr := tar.NewReader(stdout) + header, err := tr.Next() + if err != nil { + return err } - return nil -} -var sanitizeNameRx = regexp.MustCompile(`[\(\)\s]`) + err = sink(header.Size, tr) + waitErr := cmd.Wait() -func sanitizeName(name string) string { - return sanitizeNameRx.ReplaceAllString(name, "-") + if err != nil || waitErr != nil { + return fmt.Errorf("err: {%s}, wait: {%s}", err, waitErr) + } + + return nil } -// createDMI writes BIOS UUID DMI files to a container volume -func (c *container) createDMI(vm *VirtualMachine, name string) error { +// createVolume creates a volume populated with the provided files +// If the header.Size is omitted or set to zero, then len(content+1) is used. +// Docker appears to treat this volume create command as idempotent so long as it's identical +// to an existing volume, so we can use this both for creating volumes inline in container create (for labelling) and +// for population after. +// returns: +// +// uid - string +// err - error or nil +func createVolume(volumeName string, labels []string, files []tarEntry) (string, error) { image := os.Getenv("VCSIM_BUSYBOX") if image == "" { image = "busybox" } - cmd := exec.Command("docker", "run", "--rm", "-i", "-v", name+":"+"/"+name, image, "tar", "-C", "/"+name, "-xf", "-") + name := sanitizeName(volumeName) + uid := "" + + // label the volume if specified - this requires the volume be created before use + if len(labels) > 0 { + run := []string{"volume", "create"} + for i := range labels { + run = append(run, "--label", labels[i]) + } + run = append(run, name) + cmd := exec.Command("docker", run...) + out, err := cmd.Output() + if err != nil { + return "", err + } + uid = strings.TrimSpace(string(out)) + + if name == "" { + name = uid + } + } + + run := []string{"run", "--rm", "-i"} + run = append(run, "-v", name+":/"+name) + run = append(run, image, "tar", "-C", "/"+name, "-xf", "-") + cmd := exec.Command("docker", run...) stdin, err := cmd.StdinPipe() if err != nil { - return err + return uid, err } err = cmd.Start() if err != nil { - return err + return uid, err } tw := tar.NewWriter(stdin) - dmi := []struct { - name string - val func(uuid.UUID) string - }{ - {"product_uuid", productUUID}, - {"product_serial", productSerial}, - } + for _, file := range files { + header := file.header + + if header.Size == 0 && len(file.content) > 0 { + header.Size = int64(len(file.content)) + } + + if header.ModTime.IsZero() { + header.ModTime = time.Now() + } - for _, file := range dmi { - val := file.val(vm.uid) - _ = tw.WriteHeader(&tar.Header{ - Name: file.name, - Size: int64(len(val) + 1), - Mode: 0444, - ModTime: time.Now(), - }) - _, _ = fmt.Fprintln(tw, val) + if header.Mode == 0 { + header.Mode = 0444 + } + + tarErr := tw.WriteHeader(header) + if tarErr == nil { + _, tarErr = tw.Write(file.content) + } } - _ = tw.Close() - _ = stdin.Close() + err = nil + twErr := tw.Close() + stdinErr := stdin.Close() + if twErr != nil || stdinErr != nil { + err = fmt.Errorf("tw: {%s}, stdin: {%s}", twErr, stdinErr) + } - if err := cmd.Wait(); err != nil { + if waitErr := cmd.Wait(); waitErr != nil { stderr := "" - if xerr, ok := err.(*exec.ExitError); ok { + if xerr, ok := waitErr.(*exec.ExitError); ok { stderr = string(xerr.Stderr) } - log.Printf("%s %s: %s %s", vm.Name, cmd.Args, err, stderr) - return err + log.Printf("%s %s: %s %s", name, cmd.Args, waitErr, stderr) + + err = fmt.Errorf("%s, wait: {%s}", err, waitErr) + return uid, err } - return nil + return uid, err } -var ( - toolsRunning = []types.PropertyChange{ - {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsOk}, - {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsRunning)}, +func getBridge(bridgeName string) (string, error) { + // {"CreatedAt":"2023-07-11 19:22:25.45027052 +0000 UTC","Driver":"bridge","ID":"fe52c7502c5d","IPv6":"false","Internal":"false","Labels":"goodbye=,hello=","Name":"testnet","Scope":"local"} + // podman has distinctly different fields at v4.4.1 so commented out fields that don't match. We only actually care about ID + type bridgeNet struct { + // CreatedAt string + Driver string + ID string + // IPv6 string + // Internal string + // Labels string + Name string + // Scope string + } + + // if the underlay bridge already exists, return that + // we don't check for a specific label or similar so that it's possible to use a bridge created by other frameworks for composite testing + var bridge bridgeNet + cmd := exec.Command("docker", "network", "ls", "--format={{json .}}", "-f", fmt.Sprintf("name=%s$", bridgeName)) + out, err := cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s, %s", cmd.Args, err, out) + return "", err } - toolsNotRunning = []types.PropertyChange{ - {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsNotRunning}, - {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsNotRunning)}, + // unfortunately docker returns an empty string not an empty json doc and podman returns '[]' + // podman also returns an array of matches even when there's only one, so we normalize. + str := strings.TrimSpace(string(out)) + str = strings.TrimPrefix(str, "[") + str = strings.TrimSuffix(str, "]") + if len(str) == 0 { + return "", nil } -) -// start runs the container if specified by the RUN.container extraConfig property. -func (c *container) start(ctx *Context, vm *VirtualMachine) { - if c.id != "" { - start := "start" - if vm.Runtime.PowerState == types.VirtualMachinePowerStateSuspended { - start = "unpause" - } - cmd := exec.Command("docker", start, c.id) - err := cmd.Run() - if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsRunning) - } - return + err = json.Unmarshal([]byte(str), &bridge) + if err != nil { + log.Printf("vcsim %s: %s, %s", cmd.Args, err, str) + return "", err } - var args []string - var env []string - mountDMI := true - ports := make(map[string]string) + return bridge.ID, nil +} - for _, opt := range vm.Config.ExtraConfig { - val := opt.GetOptionValue() - if val.Key == "RUN.container" { - run := val.Value.(string) - err := json.Unmarshal([]byte(run), &args) - if err != nil { - args = []string{run} - } +// createBridge creates a bridge network if one does not already exist +// returns: +// +// uid - string +// err - error or nil +func createBridge(bridgeName string, labels ...string) (string, error) { - continue - } - if val.Key == "RUN.mountdmi" { - var mount bool - err := json.Unmarshal([]byte(val.Value.(string)), &mount) - if err == nil { - mountDMI = mount - } - } - if strings.HasPrefix(val.Key, "RUN.port.") { - sKey := strings.Split(val.Key, ".") - containerPort := sKey[len(sKey)-1] - ports[containerPort] = val.Value.(string) - } - if strings.HasPrefix(val.Key, "RUN.env.") { - sKey := strings.Split(val.Key, ".") - envKey := sKey[len(sKey)-1] - env = append(env, "--env", fmt.Sprintf("%s=%s", envKey, val.Value.(string))) - } - if strings.HasPrefix(val.Key, "guestinfo.") { - key := strings.Replace(strings.ToUpper(val.Key), ".", "_", -1) - env = append(env, "--env", fmt.Sprintf("VMX_%s=%s", key, val.Value.(string))) - } + id, err := getBridge(bridgeName) + if err != nil { + return "", err } - if len(args) == 0 { - return + if id != "" { + return id, nil } - if len(env) != 0 { - // Configure env as the data access method for cloud-init-vmware-guestinfo - env = append(env, "--env", "VMX_GUESTINFO=true") + + run := []string{"network", "create", "--label", createdByVcsim} + for i := range labels { + run = append(run, "--label", labels[i]) } - if len(ports) != 0 { - // Publish the specified container ports - for containerPort, hostPort := range ports { - env = append(env, "-p", fmt.Sprintf("%s:%s", hostPort, containerPort)) - } + run = append(run, bridgeName) + + cmd := exec.Command("docker", run...) + out, err := cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s: %s", cmd.Args, out, err) + return "", err } - c.name = fmt.Sprintf("vcsim-%s-%s", sanitizeName(vm.Name), vm.uid) - run := append([]string{"docker", "run", "-d", "--name", c.name}, env...) + // docker returns the ID regardless of whether you supply a name when creating the network, however + // podman returns the pretty name, so we have to normalize + id, err = getBridge(bridgeName) + if err != nil { + return "", err + } - if mountDMI { - if err := c.createDMI(vm, c.name); err != nil { - return - } - run = append(run, "-v", fmt.Sprintf("%s:%s:ro", c.name, "/sys/class/dmi/id")) + return id, nil +} + +// create +// - name - pretty name, eg. vm name +// - id - uuid or similar - this is merged into container name rather than dictating containerID +// - networks - set of bridges to connect the container to +// - volumes - colon separated tuple of volume name to mount path. Passed directly to docker via -v so mount options can be postfixed. +// - env - array of environment vairables in name=value form +// - optsAndImage - pass-though options and must include at least the container image to use, including tag if necessary +// - args - the command+args to pass to the container +func create(ctx *Context, name string, id string, networks []string, volumes []string, ports []string, env []string, image string, args []string) (*container, error) { + if len(image) == 0 { + return nil, errors.New("cannot create container backing without an image") + } + + var c container + c.name = constructContainerName(name, id) + c.changes = make(chan struct{}) + + for i := range volumes { + // we'll pre-create anonymous volumes, simply for labelling consistency + volName := strings.Split(volumes[i], ":") + createVolume(volName[0], []string{deleteWithContainer, "container=" + c.name}, nil) } - args = append(run, args...) - cmd := exec.Command(shell, "-c", strings.Join(args, " ")) + // assemble env + var dockerNet []string + var dockerVol []string + var dockerPort []string + var dockerEnv []string + + for i := range env { + dockerEnv = append(dockerEnv, "--env", env[i]) + } + + for i := range volumes { + dockerVol = append(dockerVol, "-v", volumes[i]) + } + + for i := range ports { + dockerPort = append(dockerPort, "-p", ports[i]) + } + + for i := range networks { + dockerNet = append(dockerNet, "--network", networks[i]) + } + + run := []string{"docker", "create", "--name", c.name} + run = append(run, dockerNet...) + run = append(run, dockerVol...) + run = append(run, dockerPort...) + run = append(run, dockerEnv...) + run = append(run, image) + run = append(run, args...) + + // this combines all the run options into a single string that's passed to /bin/bash -c as the single argument to force bash parsing. + // TODO: make this configurable behaviour so users also have the option of not escaping everything for bash + cmd := exec.Command(shell, "-c", strings.Join(run, " ")) out, err := cmd.Output() if err != nil { stderr := "" if xerr, ok := err.(*exec.ExitError); ok { stderr = string(xerr.Stderr) } - log.Printf("%s %s: %s %s", vm.Name, cmd.Args, err, stderr) - return + log.Printf("%s %s: %s %s", name, cmd.Args, err, stderr) + + return nil, err } - ctx.Map.Update(vm, toolsRunning) c.id = strings.TrimSpace(string(out)) - vm.logPrintf("%s %s: %s", cmd.Path, cmd.Args, c.id) - if err = c.inspect(vm); err != nil { - log.Printf("%s inspect %s: %s", vm.Name, c.id, err) - } - - // Start watching the container resource. - go c.watchContainer(vm) + return &c, nil } -// watchContainer monitors the underlying container and updates the VM -// properties based on the container status. This occurs until either -// the container or the VM is removed. -func (c *container) watchContainer(vm *VirtualMachine) { +// createVolume takes the specified files and writes them into a volume named for the container. +func (c *container) createVolume(name string, labels []string, files []tarEntry) (string, error) { + return createVolume(c.name+"--"+name, append(labels, "container="+c.name), files) +} - inspectInterval := time.Duration(5 * time.Second) - if d, err := time.ParseDuration(os.Getenv("VCSIM_INSPECT_INTERVAL")); err == nil { - inspectInterval = d +// inspect retrieves and parses container properties into directly usable struct +// returns: +// +// out - the stdout of the command +// detail - basic struct populated with container details +// err: +// * if c.id is empty, or docker returns "No such object", will return an uninitializedContainer error +// * err from either execution or parsing of json output +func (c *container) inspect() (out []byte, detail containerDetails, err error) { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + err = uninitializedContainer(errors.New("inspect of uninitialized container")) + return } - var ( - ctx = SpoofContext() - done = make(chan struct{}) - ticker = time.NewTicker(inspectInterval) - ) + var details []containerDetails - stopUpdatingVmFromContainer := func() { - ticker.Stop() - close(done) + cmd := exec.Command("docker", "inspect", c.id) + out, err = cmd.Output() + if eErr, ok := err.(*exec.ExitError); ok { + if strings.Contains(string(eErr.Stderr), "No such object") { + err = uninitializedContainer(errors.New("inspect of uninitialized container")) + } } - destroyVm := func() { - // If the container cannot be found then destroy this VM. - taskRef := vm.DestroyTask(ctx, &types.Destroy_Task{ - This: vm.Self, - }).(*methods.Destroy_TaskBody).Res.Returnval - task := ctx.Map.Get(taskRef).(*Task) + if err != nil { + return + } - // Wait for the task to complete and see if there is an error. - task.Wait() - if task.Info.Error != nil { - vm.logPrintf("failed to destroy vm: err=%v", *task.Info.Error) - } + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&details); err != nil { + return } - updateVmFromContainer := func() { - // Exit the monitor loop if the VM was removed from the API side. - if c.id == "" { - stopUpdatingVmFromContainer() - return - } + if len(details) != 1 { + err = fmt.Errorf("multiple containers (%d) match ID: %s", len(details), c.id) + return + } - if err := c.inspect(vm); err != nil { - // If there is an error inspecting the container because it no - // longer exists, then destroy the VM as well. Please note the - // reason this logic does not invoke stopUpdatingVmFromContainer - // is because that will be handled the next time this function - // is entered and c.id is empty. - if err, ok := err.(*exec.ExitError); ok { - if strings.Contains(string(err.Stderr), "No such object") { - destroyVm() - } - } - } + detail = details[0] + return +} + +// start +// - if the container already exists, start it or unpause it. +func (c *container) start(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return uninitializedContainer(errors.New("start of uninitialized container")) } - // Update the VM from the container at regular intervals until the done - // channel is closed. - for { - select { - case <-ticker.C: - ctx.WithLock(vm, updateVmFromContainer) - case <-done: - return - } + start := "start" + _, detail, err := c.inspect() + if err != nil { + return err } -} -// stop the container (if any) for the given vm. -func (c *container) stop(ctx *Context, vm *VirtualMachine) { - if c.id == "" { - return + if detail.State.Paused { + start = "unpause" } - cmd := exec.Command("docker", "stop", c.id) - err := cmd.Run() + cmd := exec.Command("docker", start, c.id) + err = cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsNotRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } // pause the container (if any) for the given vm. -func (c *container) pause(ctx *Context, vm *VirtualMachine) { - if c.id == "" { - return +func (c *container) pause(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return uninitializedContainer(errors.New("pause of uninitialized container")) } cmd := exec.Command("docker", "pause", c.id) err := cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsNotRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } // restart the container (if any) for the given vm. -func (c *container) restart(ctx *Context, vm *VirtualMachine) { - if c.id == "" { - return +func (c *container) restart(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return uninitializedContainer(errors.New("restart of uninitialized container")) } cmd := exec.Command("docker", "restart", c.id) err := cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } -// remove the container (if any) for the given vm. -func (c *container) remove(vm *VirtualMachine) { - if c.id == "" { - return - } +// stop the container (if any) for the given vm. +func (c *container) stop(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() - args := [][]string{ - {"rm", "-v", "-f", c.id}, - {"volume", "rm", "-f", c.name}, + if id == "" { + return uninitializedContainer(errors.New("stop of uninitialized container")) } - for i := range args { - cmd := exec.Command("docker", args[i]...) - err := cmd.Run() - if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } + cmd := exec.Command("docker", "stop", c.id) + err := cmd.Run() + if err != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, err) } - c.id = "" + return err } -func (c *container) exec(ctx *Context, vm *VirtualMachine, auth types.BaseGuestAuthentication, args []string) (string, types.BaseMethodFault) { - fault := vm.run.prepareGuestOperation(vm, auth) - if fault != nil { - return "", fault +// exec invokes the specified command, with executable being the first of the args, in the specified container +// returns +// +// string - combined stdout and stderr from command +// err +// * uninitializedContainer error - if c.id is empty +// * err from cmd execution +func (c *container) exec(ctx *Context, args []string) (string, error) { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return "", uninitializedContainer(errors.New("exec into uninitialized container")) } - args = append([]string{"exec", vm.run.id}, args...) + args = append([]string{"exec", c.id}, args...) cmd := exec.Command("docker", args...) - res, err := cmd.CombinedOutput() if err != nil { - log.Printf("%s: %s (%s)", vm.Self, cmd.Args, string(res)) - return "", new(types.GuestOperationsFault) + log.Printf("%s: %s (%s)", c.name, cmd.Args, string(res)) + return "", err } return strings.TrimSpace(string(res)), nil } -// From https://docs.docker.com/engine/reference/commandline/cp/ : -// > It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. -// > However, you can still copy such files by manually running tar in docker exec. -func guestUpload(id string, file string, r *http.Request) error { - cmd := exec.Command("docker", "exec", "-i", id, "tar", "Cxf", path.Dir(file), "-") - cmd.Stderr = os.Stderr - stdin, err := cmd.StdinPipe() +// remove the container (if any) for the given vm. Considers removal of an uninitialized container success. +// Also removes volumes and networks that indicate they are lifecycle coupled with this container. +// returns: +// +// err - joined err from deletion of container and any volumes or networks that have coupled lifecycle +func (c *container) remove(ctx *Context) error { + c.Lock() + defer c.Unlock() + + if c.id == "" { + // consider absence success + return nil + } + + cmd := exec.Command("docker", "rm", "-v", "-f", c.id) + err := cmd.Run() if err != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, err) return err } - if err = cmd.Start(); err != nil { - return err + + cmd = exec.Command("docker", "volume", "ls", "-q", "--filter", "label=container="+c.name, "--filter", "label="+deleteWithContainer) + volumesToReap, lsverr := cmd.Output() + if lsverr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, lsverr) } + log.Printf("%s volumes: %s", c.name, volumesToReap) - tw := tar.NewWriter(stdin) - _ = tw.WriteHeader(&tar.Header{ - Name: path.Base(file), - Size: r.ContentLength, - Mode: 0444, - ModTime: time.Now(), - }) + var rmverr error + if len(volumesToReap) > 0 { + run := []string{"volume", "rm", "-f"} + run = append(run, strings.Split(string(volumesToReap), "\n")...) + cmd = exec.Command("docker", run...) + out, rmverr := cmd.Output() + if rmverr != nil { + log.Printf("%s %s: %s, %s", c.name, cmd.Args, rmverr, out) + } + } + + cmd = exec.Command("docker", "network", "ls", "-q", "--filter", "label=container="+c.name, "--filter", "label="+deleteWithContainer) + networksToReap, lsnerr := cmd.Output() + if lsnerr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, lsnerr) + } - _, _ = io.Copy(tw, r.Body) + var rmnerr error + if len(networksToReap) > 0 { + run := []string{"network", "rm", "-f"} + run = append(run, strings.Split(string(volumesToReap), "\n")...) + cmd = exec.Command("docker", run...) + rmnerr = cmd.Run() + if rmnerr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, rmnerr) + } + } - _ = tw.Close() - _ = stdin.Close() - _ = r.Body.Close() + if err != nil || lsverr != nil || rmverr != nil || lsnerr != nil || rmnerr != nil { + return fmt.Errorf("err: {%s}, lsverr: {%s}, rmverr: {%s}, lsnerr:{%s}, rmerr: {%s}", err, lsverr, rmverr, lsnerr, rmnerr) + } - return cmd.Wait() + if c.cancelWatch != nil { + c.cancelWatch() + eventWatch.ignore(c) + } + c.id = "" + return nil } -func guestDownload(id string, file string, w http.ResponseWriter) error { - cmd := exec.Command("docker", "exec", id, "tar", "Ccf", path.Dir(file), "-", path.Base(file)) - cmd.Stderr = os.Stderr - stdout, err := cmd.StdoutPipe() - if err != nil { - return err +// updated is a simple trigger allowing a caller to indicate that something has likely changed about the container +// and interested parties should re-inspect as needed. +func (c *container) updated() { + consolidationWindow := 250 * time.Millisecond + if d, err := time.ParseDuration(os.Getenv("VCSIM_EVENT_CONSOLIDATION_WINDOW")); err == nil { + consolidationWindow = d } - if err = cmd.Start(); err != nil { - return err + + select { + case c.changes <- struct{}{}: + time.Sleep(consolidationWindow) + // as this is only a hint to avoid waiting for the full inspect interval, we don't care about accumulating + // multiple triggers. We do pause to allow large numbers of sequential updates to consolidate + default: } +} - tr := tar.NewReader(stdout) - header, err := tr.Next() - if err != nil { - return err +// watchContainer monitors the underlying container and updates +// properties based on the container status. This occurs until either +// the container or the VM is removed. +// returns: +// +// err - uninitializedContainer error - if c.id is empty +func (c *container) watchContainer(ctx context.Context, updateFn func(*containerDetails, *container) error) error { + c.Lock() + defer c.Unlock() + + if c.id == "" { + return uninitializedContainer(errors.New("Attempt to watch uninitialized container")) } - w.Header().Set("Content-Length", strconv.FormatInt(header.Size, 10)) - _, _ = io.Copy(w, tr) + eventWatch.watch(c) + + cancelCtx, cancelFunc := context.WithCancel(ctx) + c.cancelWatch = cancelFunc - return cmd.Wait() + // Update the VM from the container at regular intervals until the done + // channel is closed. + go func() { + inspectInterval := 10 * time.Second + if d, err := time.ParseDuration(os.Getenv("VCSIM_INSPECT_INTERVAL")); err == nil { + inspectInterval = d + } + ticker := time.NewTicker(inspectInterval) + + update := func() { + _, details, err := c.inspect() + var rmErr error + var removing bool + if _, ok := err.(uninitializedContainer); ok { + removing = true + rmErr = c.remove(SpoofContext()) + } + + updateErr := updateFn(&details, c) + // if we don't succeed we want to re-try + if removing && rmErr == nil && updateErr == nil { + ticker.Stop() + return + } + if updateErr != nil { + log.Printf("vcsim container watch: %s %s", c.id, updateErr) + } + } + + for { + select { + case <-c.changes: + update() + case <-ticker.C: + update() + case <-cancelCtx.Done(): + return + } + } + }() + + return nil } -const guestPrefix = "/guestFile/" +func (w *eventWatcher) watch(c *container) { + w.Lock() + defer w.Unlock() -// ServeGuest handles container guest file upload/download -func ServeGuest(w http.ResponseWriter, r *http.Request) { - // Real vCenter form: /guestFile?id=139&token=... - // vcsim form: /guestFile/tmp/foo/bar?id=ebc8837b8cb6&token=... + if w.watches == nil { + w.watches = make(map[string]*container) + } - id := r.URL.Query().Get("id") - file := strings.TrimPrefix(r.URL.Path, guestPrefix[:len(guestPrefix)-1]) - var err error + w.watches[c.id] = c - switch r.Method { - case http.MethodPut: - err = guestUpload(id, file, r) - case http.MethodGet: - err = guestDownload(id, file, w) - default: - w.WriteHeader(http.StatusMethodNotAllowed) - return + if w.stdin == nil { + cmd := exec.Command("docker", "events", "--format", "'{{.ID}}'", "--filter", "Type=container") + w.stdout, _ = cmd.StdoutPipe() + w.stdin, _ = cmd.StdinPipe() + err := cmd.Start() + if err != nil { + log.Printf("docker event watcher: %s %s", cmd.Args, err) + w.stdin = nil + w.stdout = nil + w.process = nil + + return + } + + w.process = cmd.Process + + go w.monitor() } +} - if err != nil { - log.Printf("%s %s: %s", r.Method, r.URL, err) - w.WriteHeader(http.StatusInternalServerError) +func (w *eventWatcher) ignore(c *container) { + w.Lock() + + delete(w.watches, c.id) + + if len(w.watches) == 0 && w.stdin != nil { + w.stop() } + + w.Unlock() } -// productSerial returns the uuid in /sys/class/dmi/id/product_serial format -func productSerial(id uuid.UUID) string { - var dst [len(id)*2 + len(id) - 1]byte - - j := 0 - for i := 0; i < len(id); i++ { - hex.Encode(dst[j:j+2], id[i:i+1]) - j += 3 - if j < len(dst) { - s := j - 1 - if s == len(dst)/2 { - dst[s] = '-' - } else { - dst[s] = ' ' - } - } +func (w *eventWatcher) monitor() { + w.Lock() + watches := len(w.watches) + w.Unlock() + + if watches == 0 { + return } - return fmt.Sprintf("VMware-%s", string(dst[:])) + scanner := bufio.NewScanner(w.stdout) + for scanner.Scan() { + id := strings.TrimSpace(scanner.Text()) + + w.Lock() + container := w.watches[id] + w.Unlock() + + if container != nil { + // this is called in a routine to allow an event consolidation window + go container.updated() + } + } } -// productUUID returns the uuid in /sys/class/dmi/id/product_uuid format -func productUUID(id uuid.UUID) string { - var dst [36]byte - - hex.Encode(dst[0:2], id[3:4]) - hex.Encode(dst[2:4], id[2:3]) - hex.Encode(dst[4:6], id[1:2]) - hex.Encode(dst[6:8], id[0:1]) - dst[8] = '-' - hex.Encode(dst[9:11], id[5:6]) - hex.Encode(dst[11:13], id[4:5]) - dst[13] = '-' - hex.Encode(dst[14:16], id[7:8]) - hex.Encode(dst[16:18], id[6:7]) - dst[18] = '-' - hex.Encode(dst[19:23], id[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], id[10:]) - - return strings.ToUpper(string(dst[:])) +func (w *eventWatcher) stop() { + if w.stdin != nil { + w.stdin.Close() + w.stdin = nil + } + if w.stdout != nil { + w.stdout.Close() + w.stdout = nil + } + w.process.Kill() } diff --git a/vendor/github.com/vmware/govmomi/simulator/container_host_system.go b/vendor/github.com/vmware/govmomi/simulator/container_host_system.go new file mode 100644 index 0000000000..c3d283abb7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/container_host_system.go @@ -0,0 +1,351 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "strings" + + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + advOptPrefixPnicToUnderlayPrefix = "RUN.underlay." + advOptContainerBackingImage = "RUN.container" + defaultUnderlayBridgeName = "vcsim-underlay" +) + +type simHost struct { + host *HostSystem + c *container +} + +// createSimHostMounts iterates over the provide filesystem mount info, creating docker volumes. It does _not_ delete volumes +// already created if creation of one fails. +// Returns: +// volume mounts: mount options suitable to pass directly to docker +// exec commands: a set of commands to run in the sim host after creation +// error: if construction of the above outputs fails +func createSimHostMounts(ctx *Context, containerName string, mounts []types.HostFileSystemMountInfo) ([]string, [][]string, error) { + var dockerVol []string + var symlinkCmds [][]string + + for i := range mounts { + info := &mounts[i] + name := info.Volume.GetHostFileSystemVolume().Name + + // NOTE: if we ever need persistence cross-invocation we can look at encoding the disk info as a label + labels := []string{"name=" + name, "container=" + containerName, deleteWithContainer} + dockerUuid, err := createVolume("", labels, nil) + if err != nil { + return nil, nil, err + } + + uuid := volumeIDtoHostVolumeUUID(dockerUuid) + name = strings.Replace(name, uuidToken, uuid, -1) + + switch vol := info.Volume.(type) { + case *types.HostVmfsVolume: + vol.BlockSizeMb = 1 + vol.BlockSize = units.KB + vol.UnmapGranularity = units.KB + vol.UnmapPriority = "low" + vol.MajorVersion = 6 + vol.Version = "6.82" + vol.Uuid = uuid + vol.HostFileSystemVolume.Name = name + for e := range vol.Extent { + vol.Extent[e].DiskName = "____simulated_volume_____" + if vol.Extent[e].Partition == 0 { + // HACK: this should be unique within the diskname, but for now this will suffice + // partitions start at 1 + vol.Extent[e].Partition = int32(e + 1) + } + } + vol.Ssd = types.NewBool(true) + vol.Local = types.NewBool(true) + case *types.HostVfatVolume: + vol.HostFileSystemVolume.Name = name + } + + info.VStorageSupport = "vStorageUnsupported" + + info.MountInfo.Path = "/vmfs/volumes/" + uuid + info.MountInfo.Mounted = types.NewBool(true) + info.MountInfo.Accessible = types.NewBool(true) + if info.MountInfo.AccessMode == "" { + info.MountInfo.AccessMode = "readWrite" + } + + opt := "rw" + if info.MountInfo.AccessMode == "readOnly" { + opt = "ro" + } + + dockerVol = append(dockerVol, fmt.Sprintf("%s:/vmfs/volumes/%s:%s", dockerUuid, uuid, opt)) + + // create symlinks from /vmfs/volumes/ for the Volume Name - the direct mount (path) is only the uuid + // ? can we do this via a script in the ESX image instead of via exec? + // ? are the volume names exposed in any manner inside the host? They must be because these mounts exist but where does that come from? Chicken and egg problem? ConfigStore? + symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), fmt.Sprintf("/vmfs/volumes/%s", name)}) + if strings.HasPrefix(name, "OSDATA") { + symlinkCmds = append(symlinkCmds, []string{"mkdir", "-p", "/var/lib/vmware"}) + symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), "/var/lib/vmware/osdata"}) + } + } + + return dockerVol, symlinkCmds, nil +} + +// createSimHostNetworks creates the networks for the host if not already created. Because we expect multiple hosts on the same network to act as a cluster +// it's likely that only the first host will create networks. +// This includes: +// * bridge network per-pNIC +// * bridge network per-DVS +// +// Returns: +// * array of networks to attach to +// * array of commands to run +// * error +// +// TODO: implement bridge network per DVS - not needed until container backed VMs are "created" on container backed "hosts" +func createSimHostNetworks(ctx *Context, containerName string, networkInfo *types.HostNetworkInfo, advOpts *OptionManager) ([]string, [][]string, error) { + var dockerNet []string + var cmds [][]string + + existingNets := make(map[string]string) + + // a pnic does not have an IP so this is purely a connectivity statement, not a network identity, however this is not how docker works + // so we're going to end up with a veth (our pnic) that does have an IP assigned. That IP will end up being used in a NetConfig structure associated + // with the pNIC. See HostSystem.getNetConfigInterface. + for i := range networkInfo.Pnic { + pnicName := networkInfo.Pnic[i].Device + + bridge := getPnicUnderlay(advOpts, pnicName) + + if pnic, attached := existingNets[bridge]; attached { + return nil, nil, fmt.Errorf("cannot attach multiple pNICs to the same underlay: %s and %s both attempting to connect to %s for %s", pnic, pnicName, bridge, containerName) + } + + _, err := createBridge(bridge) + if err != nil { + return nil, nil, err + } + + dockerNet = append(dockerNet, bridge) + existingNets[bridge] = pnicName + } + + return dockerNet, cmds, nil +} + +func getPnicUnderlay(advOpts *OptionManager, pnicName string) string { + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: advOptPrefixPnicToUnderlayPrefix + pnicName}).(*methods.QueryOptionsBody).Res + return queryRes.Returnval[0].GetOptionValue().Value.(string) +} + +// createSimulationHostcreates a simHost binding if the host.ConfigManager.AdvancedOption set contains a key "RUN.container". +// If the set does not contain that key, this returns nil. +// Methods on the simHost type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +// +// The created simhost is based off of the details of the supplied host system. +// VMFS locations are created based on FileSystemMountInfo +// Bridge networks are created to simulate underlay networks - one per pNIC. You cannot connect two pNICs to the same underlay. +// +// On Network connectivity - initially this is using docker network constructs. This means we cannot easily use nested "ip netns" so we cannot +// have a perfect representation of the ESX structure: pnic(veth)->vswtich(bridge)->{vmk,vnic}(veth) +// Instead we have the following: +// * bridge network per underlay - everything connects directly to the underlay +// * VMs/CRXs connect to the underlay dictated by the Uplink pNIC attached to their vSwitch +// * hostd vmknic gets the "host" container IP - we don't currently support multiple vmknics with different IPs +// * no support for mocking VLANs +func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { + sh := &simHost{ + host: host, + } + + advOpts := ctx.Map.Get(host.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.QueryOptions(&types.QueryOptions{Name: "RUN.container"}).(*methods.QueryOptionsBody).Fault() + if fault != nil { + if _, ok := fault.VimFault().(*types.InvalidName); ok { + return nil, nil + } + return nil, fmt.Errorf("errror retrieving container backing from host config manager: %+v", fault.VimFault()) + } + + // assemble env + var dockerEnv []string + + var execCmds [][]string + + var err error + + hName := host.Summary.Config.Name + hUuid := host.Summary.Hardware.Uuid + containerName := constructContainerName(hName, hUuid) + + // create volumes and mounts + dockerVol, volCmds, err := createSimHostMounts(ctx, containerName, host.Config.FileSystemVolume.MountInfo) + if err != nil { + return nil, err + } + execCmds = append(execCmds, volCmds...) + + // create networks + dockerNet, netCmds, err := createSimHostNetworks(ctx, containerName, host.Config.Network, advOpts) + if err != nil { + return nil, err + } + execCmds = append(execCmds, netCmds...) + + // create the container + sh.c, err = create(ctx, hName, hUuid, dockerNet, dockerVol, nil, dockerEnv, "alpine", []string{"sleep", "infinity"}) + if err != nil { + return nil, err + } + + // start the container + err = sh.c.start(ctx) + if err != nil { + return nil, err + } + + // run post-creation steps + for _, cmd := range execCmds { + _, err := sh.c.exec(ctx, cmd) + if err != nil { + return nil, err + } + } + + _, detail, err := sh.c.inspect() + + for i := range host.Config.Network.Pnic { + pnic := &host.Config.Network.Pnic[i] + bridge := getPnicUnderlay(advOpts, pnic.Device) + settings := detail.NetworkSettings.Networks[bridge] + + // it doesn't really make sense at an ESX level to set this information as IP bindings are associated with + // vnics (VMs) or vmknics (daemons such as hostd). + // However it's a useful location to stash this info in a manner that can be retrieved at a later date. + pnic.Spec.Ip.IpAddress = settings.IPAddress + pnic.Spec.Ip.SubnetMask = prefixToMask(settings.IPPrefixLen) + + pnic.Mac = settings.MacAddress + } + + // update the active "management" nicType with the container IP for vmnic0 + netconfig, err := host.getNetConfigInterface(ctx, "management") + if err != nil { + return nil, err + } + netconfig.vmk.Spec.Ip.IpAddress = netconfig.uplink.Spec.Ip.IpAddress + netconfig.vmk.Spec.Ip.SubnetMask = netconfig.uplink.Spec.Ip.SubnetMask + netconfig.vmk.Spec.Mac = netconfig.uplink.Mac + + return sh, nil +} + +// remove destroys the container associated with the host and any volumes with labels specifying their lifecycle +// is coupled with the container +func (sh *simHost) remove(ctx *Context) error { + if sh == nil { + return nil + } + + return sh.c.remove(ctx) +} + +// volumeIDtoHostVolumeUUID takes the 64 char docker uuid and converts it into a 32char ESX form of 8-8-4-12 +// Perhaps we should do this using an md5 rehash, but instead we just take the first 32char for ease of cross-reference. +func volumeIDtoHostVolumeUUID(id string) string { + return fmt.Sprintf("%s-%s-%s-%s", id[0:8], id[8:16], id[16:20], id[20:32]) +} + +// By reference to physical system, partition numbering tends to work out like this: +// 1. EFI System (100 MB) +// Free space (1.97 MB) +// 5. Basic Data (4 GB) (bootbank1) +// 6. Basic Data (4 GB) (bootbank2) +// 7. VMFSL (119.9 GB) (os-data) +// 8. VMFS (1 TB) (datastore1) +// I assume the jump from 1 -> 5 harks back to the primary/logical partitions from MBT days +const uuidToken = "%__UUID__%" + +var defaultSimVolumes = []types.HostFileSystemMountInfo{ + { + MountInfo: types.HostMountInfo{ + AccessMode: "readWrite", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "VMFS", + Name: "datastore1", + Capacity: 1 * units.TB, + }, + Extent: []types.HostScsiDiskPartition{ + { + Partition: 8, + }, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readWrite", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "OSDATA-%__UUID__%", + Capacity: 128 * units.GB, + }, + Extent: []types.HostScsiDiskPartition{ + { + Partition: 7, + }, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readOnly", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK1", + Capacity: 4 * units.GB, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readOnly", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK2", + Capacity: 4 * units.GB, + }, + }, + }, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/container_virtual_machine.go b/vendor/github.com/vmware/govmomi/simulator/container_virtual_machine.go new file mode 100644 index 0000000000..a2b91fd86f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/container_virtual_machine.go @@ -0,0 +1,511 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "archive/tar" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "strconv" + "strings" + + "github.com/google/uuid" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +const ContainerBackingOptionKey = "RUN.container" + +var ( + toolsRunning = []types.PropertyChange{ + {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsOk}, + {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsRunning)}, + } + + toolsNotRunning = []types.PropertyChange{ + {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsNotRunning}, + {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsNotRunning)}, + } +) + +type simVM struct { + vm *VirtualMachine + c *container +} + +// createSimulationVM inspects the provided VirtualMachine and creates a simVM binding for it if +// the vm.Config.ExtraConfig set contains a key "RUN.container". +// If the ExtraConfig set does not contain that key, this returns nil. +// Methods on the simVM type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +func createSimulationVM(vm *VirtualMachine) *simVM { + svm := &simVM{ + vm: vm, + } + + for _, opt := range vm.Config.ExtraConfig { + val := opt.GetOptionValue() + if val.Key == ContainerBackingOptionKey { + return svm + } + } + + return nil +} + +// applies container network settings to vm.Guest properties. +func (svm *simVM) syncNetworkConfigToVMGuestProperties() error { + if svm == nil { + return nil + } + + out, detail, err := svm.c.inspect() + if err != nil { + return err + } + + svm.vm.Config.Annotation = "inspect" + svm.vm.logPrintf("%s: %s", svm.vm.Config.Annotation, string(out)) + + netS := detail.NetworkSettings.networkSettings + + // ? Why is this valid - we're taking the first entry while iterating over a MAP + for _, n := range detail.NetworkSettings.Networks { + netS = n + break + } + + if detail.State.Paused { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStateSuspended + } else if detail.State.Running { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOn + } else { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff + } + + svm.vm.Guest.IpAddress = netS.IPAddress + svm.vm.Summary.Guest.IpAddress = netS.IPAddress + + if len(svm.vm.Guest.Net) != 0 { + net := &svm.vm.Guest.Net[0] + net.IpAddress = []string{netS.IPAddress} + net.MacAddress = netS.MacAddress + net.IpConfig = &types.NetIpConfigInfo{ + IpAddress: []types.NetIpConfigInfoIpAddress{{ + IpAddress: netS.IPAddress, + PrefixLength: int32(netS.IPPrefixLen), + State: string(types.NetIpConfigInfoIpAddressStatusPreferred), + }}, + } + } + + for _, d := range svm.vm.Config.Hardware.Device { + if eth, ok := d.(types.BaseVirtualEthernetCard); ok { + eth.GetVirtualEthernetCard().MacAddress = netS.MacAddress + break + } + } + + return nil +} + +func (svm *simVM) prepareGuestOperation(auth types.BaseGuestAuthentication) types.BaseMethodFault { + if svm != nil && (svm.c == nil || svm.c.id == "") { + return new(types.GuestOperationsUnavailable) + } + + if svm.vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { + return &types.InvalidPowerState{ + RequestedState: types.VirtualMachinePowerStatePoweredOn, + ExistingState: svm.vm.Runtime.PowerState, + } + } + + switch creds := auth.(type) { + case *types.NamePasswordAuthentication: + if creds.Username == "" || creds.Password == "" { + return new(types.InvalidGuestLogin) + } + default: + return new(types.InvalidGuestLogin) + } + + return nil +} + +// populateDMI writes BIOS UUID DMI files to a container volume +func (svm *simVM) populateDMI() error { + if svm.c == nil { + return nil + } + + files := []tarEntry{ + { + &tar.Header{ + Name: "product_uuid", + Mode: 0444, + }, + []byte(productUUID(svm.vm.uid)), + }, + { + &tar.Header{ + Name: "product_serial", + Mode: 0444, + }, + []byte(productSerial(svm.vm.uid)), + }, + } + + _, err := svm.c.createVolume("dmi", []string{deleteWithContainer}, files) + return err +} + +// start runs the container if specified by the RUN.container extraConfig property. +// lazily creates a container backing if specified by an ExtraConfig property with key "RUN.container" +func (svm *simVM) start(ctx *Context) error { + if svm == nil { + return nil + } + + if svm.c != nil && svm.c.id != "" { + err := svm.c.start(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "start", err) + } else { + ctx.Map.Update(svm.vm, toolsRunning) + } + + return err + } + + var args []string + var env []string + var ports []string + mountDMI := true + + for _, opt := range svm.vm.Config.ExtraConfig { + val := opt.GetOptionValue() + if val.Key == ContainerBackingOptionKey { + run := val.Value.(string) + err := json.Unmarshal([]byte(run), &args) + if err != nil { + args = []string{run} + } + + continue + } + + if val.Key == "RUN.mountdmi" { + var mount bool + err := json.Unmarshal([]byte(val.Value.(string)), &mount) + if err == nil { + mountDMI = mount + } + + continue + } + + if strings.HasPrefix(val.Key, "RUN.port.") { + // ? would this not make more sense as a set of tuples in the value? + // or inlined into the RUN.container freeform string as is the case with the nginx volume in the examples? + sKey := strings.Split(val.Key, ".") + containerPort := sKey[len(sKey)-1] + ports = append(ports, fmt.Sprintf("%s:%s", val.Value.(string), containerPort)) + + continue + } + + if strings.HasPrefix(val.Key, "RUN.env.") { + sKey := strings.Split(val.Key, ".") + envKey := sKey[len(sKey)-1] + env = append(env, fmt.Sprintf("%s=%s", envKey, val.Value.(string))) + } + + if strings.HasPrefix(val.Key, "guestinfo.") { + key := strings.Replace(strings.ToUpper(val.Key), ".", "_", -1) + env = append(env, fmt.Sprintf("VMX_%s=%s", key, val.Value.(string))) + + continue + } + } + + if len(args) == 0 { + // not an error - it's simply a simVM that shouldn't be backed by a container + return nil + } + + if len(env) != 0 { + // Configure env as the data access method for cloud-init-vmware-guestinfo + env = append(env, "VMX_GUESTINFO=true") + } + + volumes := []string{} + if mountDMI { + volumes = append(volumes, constructVolumeName(svm.vm.Name, svm.vm.uid.String(), "dmi")+":/sys/class/dmi/id") + } + + var err error + svm.c, err = create(ctx, svm.vm.Name, svm.vm.uid.String(), nil, volumes, ports, env, args[0], args[1:]) + if err != nil { + return err + } + + if mountDMI { + // not combined with the test assembling volumes because we want to have the container name first. + // cannot add a label to a volume after creation, so if we want to associate with the container ID the + // container must come first + err = svm.populateDMI() + if err != nil { + return err + } + } + + err = svm.c.start(ctx) + if err != nil { + log.Printf("%s %s: %s %s", svm.vm.Name, "start", args, err) + return err + } + + ctx.Map.Update(svm.vm, toolsRunning) + + svm.vm.logPrintf("%s: %s", args, svm.c.id) + + if err = svm.syncNetworkConfigToVMGuestProperties(); err != nil { + log.Printf("%s inspect %s: %s", svm.vm.Name, svm.c.id, err) + } + + callback := func(details *containerDetails, c *container) error { + spoofctx := SpoofContext() + + if c.id == "" && svm.vm != nil { + // If the container cannot be found then destroy this VM unless the VM is no longer configured for container backing (svm.vm == nil) + taskRef := svm.vm.DestroyTask(spoofctx, &types.Destroy_Task{This: svm.vm.Self}).(*methods.Destroy_TaskBody).Res.Returnval + task, ok := spoofctx.Map.Get(taskRef).(*Task) + if !ok { + panic(fmt.Sprintf("couldn't retrieve task for moref %+q while deleting VM %s", taskRef, svm.vm.Name)) + } + + // Wait for the task to complete and see if there is an error. + task.Wait() + if task.Info.Error != nil { + msg := fmt.Sprintf("failed to destroy vm: err=%v", *task.Info.Error) + svm.vm.logPrintf(msg) + + return errors.New(msg) + } + } + + return svm.syncNetworkConfigToVMGuestProperties() + } + + // Start watching the container resource. + err = svm.c.watchContainer(context.Background(), callback) + if _, ok := err.(uninitializedContainer); ok { + // the container has been deleted before we could watch, despite successful launch so clean up. + callback(nil, svm.c) + + // successful launch so nil the error + return nil + } + + return err +} + +// stop the container (if any) for the given vm. +func (svm *simVM) stop(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.stop(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "stop", err) + + return err + } + + ctx.Map.Update(svm.vm, toolsNotRunning) + + return nil +} + +// pause the container (if any) for the given vm. +func (svm *simVM) pause(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.pause(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "pause", err) + + return err + } + + ctx.Map.Update(svm.vm, toolsNotRunning) + + return nil +} + +// restart the container (if any) for the given vm. +func (svm *simVM) restart(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.restart(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "restart", err) + + return err + } + + ctx.Map.Update(svm.vm, toolsRunning) + + return nil +} + +// remove the container (if any) for the given vm. +func (svm *simVM) remove(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.remove(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "remove", err) + + return err + } + + return nil +} + +func (svm *simVM) exec(ctx *Context, auth types.BaseGuestAuthentication, args []string) (string, types.BaseMethodFault) { + if svm == nil || svm.c == nil { + return "", nil + } + + fault := svm.prepareGuestOperation(auth) + if fault != nil { + return "", fault + } + + out, err := svm.c.exec(ctx, args) + if err != nil { + log.Printf("%s: %s (%s)", svm.vm.Name, args, string(out)) + return "", new(types.GuestOperationsFault) + } + + return strings.TrimSpace(string(out)), nil +} + +func guestUpload(id string, file string, r *http.Request) error { + // TODO: decide behaviour for no container + err := copyToGuest(id, file, r.ContentLength, r.Body) + _ = r.Body.Close() + return err +} + +func guestDownload(id string, file string, w http.ResponseWriter) error { + // TODO: decide behaviour for no container + sink := func(len int64, r io.Reader) error { + w.Header().Set("Content-Length", strconv.FormatInt(len, 10)) + _, err := io.Copy(w, r) + return err + } + + err := copyFromGuest(id, file, sink) + return err +} + +const guestPrefix = "/guestFile/" + +// ServeGuest handles container guest file upload/download +func ServeGuest(w http.ResponseWriter, r *http.Request) { + // Real vCenter form: /guestFile?id=139&token=... + // vcsim form: /guestFile/tmp/foo/bar?id=ebc8837b8cb6&token=... + + id := r.URL.Query().Get("id") + file := strings.TrimPrefix(r.URL.Path, guestPrefix[:len(guestPrefix)-1]) + var err error + + switch r.Method { + case http.MethodPut: + err = guestUpload(id, file, r) + case http.MethodGet: + err = guestDownload(id, file, w) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if err != nil { + log.Printf("%s %s: %s", r.Method, r.URL, err) + w.WriteHeader(http.StatusInternalServerError) + } +} + +// productSerial returns the uuid in /sys/class/dmi/id/product_serial format +func productSerial(id uuid.UUID) string { + var dst [len(id)*2 + len(id) - 1]byte + + j := 0 + for i := 0; i < len(id); i++ { + hex.Encode(dst[j:j+2], id[i:i+1]) + j += 3 + if j < len(dst) { + s := j - 1 + if s == len(dst)/2 { + dst[s] = '-' + } else { + dst[s] = ' ' + } + } + } + + return fmt.Sprintf("VMware-%s", string(dst[:])) +} + +// productUUID returns the uuid in /sys/class/dmi/id/product_uuid format +func productUUID(id uuid.UUID) string { + var dst [36]byte + + hex.Encode(dst[0:2], id[3:4]) + hex.Encode(dst[2:4], id[2:3]) + hex.Encode(dst[4:6], id[1:2]) + hex.Encode(dst[6:8], id[0:1]) + dst[8] = '-' + hex.Encode(dst[9:11], id[5:6]) + hex.Encode(dst[11:13], id[4:5]) + dst[13] = '-' + hex.Encode(dst[14:16], id[7:8]) + hex.Encode(dst[16:18], id[6:7]) + dst[18] = '-' + hex.Encode(dst[19:23], id[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], id[10:]) + + return strings.ToUpper(string(dst[:])) +} diff --git a/vendor/github.com/vmware/govmomi/simulator/dataset.go b/vendor/github.com/vmware/govmomi/simulator/dataset.go new file mode 100644 index 0000000000..0a1b0627ee --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/dataset.go @@ -0,0 +1,65 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "github.com/vmware/govmomi/vapi/vm/dataset" +) + +type DataSet struct { + *dataset.Info + ID string + Entries map[string]string +} + +func copyDataSetsForVmClone(src map[string]*DataSet) map[string]*DataSet { + copy := make(map[string]*DataSet, len(src)) + for k, v := range src { + if v.OmitFromSnapshotAndClone { + continue + } + copy[k] = copyDataSet(v) + } + return copy +} + +func copyDataSet(src *DataSet) *DataSet { + if src == nil { + return nil + } + copy := &DataSet{ + Info: &dataset.Info{ + Name: src.Name, + Description: src.Description, + Host: src.Host, + Guest: src.Guest, + Used: src.Used, + OmitFromSnapshotAndClone: src.OmitFromSnapshotAndClone, + }, + ID: src.ID, + Entries: copyEntries(src.Entries), + } + return copy +} + +func copyEntries(src map[string]string) map[string]string { + copy := make(map[string]string, len(src)) + for k, v := range src { + copy[k] = v + } + return copy +} diff --git a/vendor/github.com/vmware/govmomi/simulator/datastore.go b/vendor/github.com/vmware/govmomi/simulator/datastore.go index f341bdd174..277674afef 100644 --- a/vendor/github.com/vmware/govmomi/simulator/datastore.go +++ b/vendor/github.com/vmware/govmomi/simulator/datastore.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -83,6 +83,7 @@ func (ds *Datastore) RefreshDatastore(*types.RefreshDatastore) soap.HasFault { info.Timestamp = types.NewTime(time.Now()) + r.Res = &types.RefreshDatastoreResponse{} return r } diff --git a/vendor/github.com/vmware/govmomi/simulator/doc.go b/vendor/github.com/vmware/govmomi/simulator/doc.go index 441e9a0e7f..61635c32c1 100644 --- a/vendor/github.com/vmware/govmomi/simulator/doc.go +++ b/vendor/github.com/vmware/govmomi/simulator/doc.go @@ -17,6 +17,6 @@ limitations under the License. /* Package simulator is a mock framework for the vSphere API. -See also: https://github.com/vmware/govmomi/blob/master/vcsim/README.md +See also: https://github.com/vmware/govmomi/blob/main/vcsim/README.md */ package simulator diff --git a/vendor/github.com/vmware/govmomi/simulator/environment_browser.go b/vendor/github.com/vmware/govmomi/simulator/environment_browser.go index 4f8ba8ed9e..31b50939ab 100644 --- a/vendor/github.com/vmware/govmomi/simulator/environment_browser.go +++ b/vendor/github.com/vmware/govmomi/simulator/environment_browser.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2019 VMware, Inc. All Rights Reserved. +Copyright (c) 2019-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -29,7 +29,10 @@ import ( type EnvironmentBrowser struct { mo.EnvironmentBrowser - types.QueryConfigOptionResponse + QueryConfigTargetResponse types.QueryConfigTargetResponse + QueryConfigOptionResponse types.QueryConfigOptionResponse + QueryConfigOptionDescriptorResponse types.QueryConfigOptionDescriptorResponse + QueryTargetCapabilitiesResponse types.QueryTargetCapabilitiesResponse } func newEnvironmentBrowser() *types.ManagedObjectReference { @@ -135,7 +138,13 @@ func (b *EnvironmentBrowser) QueryConfigOptionEx(req *types.QueryConfigOptionEx) func (b *EnvironmentBrowser) QueryConfigOptionDescriptor(ctx *Context, req *types.QueryConfigOptionDescriptor) soap.HasFault { body := &methods.QueryConfigOptionDescriptorBody{ - Res: new(types.QueryConfigOptionDescriptorResponse), + Res: &types.QueryConfigOptionDescriptorResponse{ + Returnval: b.QueryConfigOptionDescriptorResponse.Returnval, + }, + } + + if body.Res.Returnval != nil { + return body } body.Res.Returnval = []types.VirtualMachineConfigOptionDescriptor{{ @@ -154,12 +163,18 @@ func (b *EnvironmentBrowser) QueryConfigOptionDescriptor(ctx *Context, req *type func (b *EnvironmentBrowser) QueryConfigTarget(ctx *Context, req *types.QueryConfigTarget) soap.HasFault { body := &methods.QueryConfigTargetBody{ Res: &types.QueryConfigTargetResponse{ - Returnval: &types.ConfigTarget{ - SmcPresent: types.NewBool(false), - }, + Returnval: b.QueryConfigTargetResponse.Returnval, }, } - target := body.Res.Returnval + + if body.Res.Returnval != nil { + return body + } + + target := &types.ConfigTarget{ + SmcPresent: types.NewBool(false), + } + body.Res.Returnval = target var hosts []types.ManagedObjectReference if req.Host == nil { @@ -233,3 +248,22 @@ func (b *EnvironmentBrowser) QueryConfigTarget(ctx *Context, req *types.QueryCon return body } + +func (b *EnvironmentBrowser) QueryTargetCapabilities(ctx *Context, req *types.QueryTargetCapabilities) soap.HasFault { + body := &methods.QueryTargetCapabilitiesBody{ + Res: &types.QueryTargetCapabilitiesResponse{ + Returnval: b.QueryTargetCapabilitiesResponse.Returnval, + }, + } + + if body.Res.Returnval != nil { + return body + } + + body.Res.Returnval = &types.HostCapability{ + VmotionSupported: true, + MaintenanceModeSupported: true, + } + + return body +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go b/vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go index d76459be9e..38cd244418 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/authorization_manager.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ import "github.com/vmware/govmomi/vim25/types" // RoleList is the default template for the AuthorizationManager roleList property. // Capture method: -// govc object.collect -s -dump AuthorizationManager:ha-authmgr roleList +// govc object.collect -s -dump AuthorizationManager:ha-authmgr roleList var RoleList = []types.AuthorizationRole{ { RoleId: -6, diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go b/vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go index c0f95eff9c..374cd9518c 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/datacenter.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -23,7 +23,7 @@ import ( // Datacenter is the default template for Datacenter properties. // Capture method: -// govc datacenter.info -dump +// govc datacenter.info -dump var Datacenter = mo.Datacenter{ ManagedEntity: mo.ManagedEntity{ ExtensibleManagedObject: mo.ExtensibleManagedObject{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/event_manager.go b/vendor/github.com/vmware/govmomi/simulator/esx/event_manager.go index 0a572770f7..f231a3f905 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/event_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/event_manager.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ import "github.com/vmware/govmomi/vim25/types" // EventInfo is the default template for the EventManager description.eventInfo property. // Capture method: -// govc object.collect -s -dump EventManager:ha-eventmgr description.eventInfo +// govc object.collect -s -dump EventManager:ha-eventmgr description.eventInfo // The captured list has been manually pruned and FullFormat fields changed to use Go's template variable syntax. var EventInfo = []types.EventDescriptionEventDetail{ { diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_config_filesystemvolume.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_config_filesystemvolume.go new file mode 100644 index 0000000000..01c62d0a48 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_config_filesystemvolume.go @@ -0,0 +1,144 @@ +/* +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import ( + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/types" +) + +// HostConfigInfo is the default template for the HostSystem config property. +// Capture method: +// govc object.collect -s -dump HostSystem:ha-host config.fileSystemVolume +// - slightly modified for uuids and DiskName +var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ + VolumeTypeList: []string{"VMFS", "NFS", "NFS41", "vsan", "VVOL", "VFFS", "OTHER", "PMEM"}, + MountInfo: []types.HostFileSystemMountInfo{ + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000003", + AccessMode: "readWrite", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "VMFS", + Name: "datastore1", + Capacity: 3.5 * units.TB, + }, + BlockSizeMb: 1, + BlockSize: units.KB, + UnmapGranularity: units.KB, + UnmapPriority: "low", + UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), + MaxBlocks: 61 * units.MB, + MajorVersion: 6, + Version: "6.82", + Uuid: "deadbeef-01234567-89ab-cdef00000003", + Extent: []types.HostScsiDiskPartition{ + { + DiskName: "____simulated_volumes_____", + Partition: 8, + }, + }, + VmfsUpgradable: false, + ForceMountedInfo: (*types.HostForceMountedInfo)(nil), + Ssd: types.NewBool(true), + Local: types.NewBool(true), + ScsiDiskType: "", + }, + VStorageSupport: "vStorageUnsupported", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000002", + AccessMode: "readWrite", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "OSDATA-deadbeef-01234567-89ab-cdef00000002", + Capacity: 128 * units.GB, + }, + BlockSizeMb: 1, + BlockSize: units.KB, + UnmapGranularity: 0, + UnmapPriority: "", + UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), + MaxBlocks: 256 * units.KB, + MajorVersion: 1, + Version: "1.00", + Uuid: "deadbeef-01234567-89ab-cdef00000002", + Extent: []types.HostScsiDiskPartition{ + { + DiskName: "____simulated_volumes_____", + Partition: 7, + }, + }, + VmfsUpgradable: false, + ForceMountedInfo: (*types.HostForceMountedInfo)(nil), + Ssd: types.NewBool(true), + Local: types.NewBool(true), + ScsiDiskType: "", + }, + VStorageSupport: "vStorageUnsupported", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000001", + AccessMode: "readOnly", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK1", + Capacity: 4 * units.GB, + }, + }, + VStorageSupport: "", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000000", + AccessMode: "readOnly", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK2", + Capacity: 4 * units.GB, + }, + }, + VStorageSupport: "", + }, + }, +} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go index fd7877b28c..d56c3b607b 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_config_info.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ import "github.com/vmware/govmomi/vim25/types" // HostConfigInfo is the default template for the HostSystem config property. // Capture method: -// govc object.collect -s -dump HostSystem:ha-host config +// govc object.collect -s -dump HostSystem:ha-host config var HostConfigInfo = types.HostConfigInfo{ Host: types.ManagedObjectReference{Type: "HostSystem", Value: "ha-host"}, Product: types.AboutInfo{ @@ -50,6 +50,7 @@ var HostConfigInfo = types.HostConfigInfo{ ConsoleReservation: (*types.ServiceConsoleReservationInfo)(nil), VirtualMachineReservation: (*types.VirtualMachineMemoryReservationInfo)(nil), StorageDevice: &HostStorageDeviceInfo, + FileSystemVolume: &HostFileSystemVolumeInfo, SystemFile: nil, Network: &types.HostNetworkInfo{ Vswitch: []types.HostVirtualSwitch{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go index 11c1285aad..46c5dea7e2 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_firewall_system.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ import "github.com/vmware/govmomi/vim25/types" // HostFirewallInfo is the default template for the HostSystem config.firewall property. // Capture method: -// govc object.collect -s -dump HostSystem:ha-host config.firewall +// govc object.collect -s -dump HostSystem:ha-host config.firewall var HostFirewallInfo = types.HostFirewallInfo{ DynamicData: types.DynamicData{}, DefaultPolicy: types.HostFirewallDefaultPolicy{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go index aa633ad34b..a30303fa78 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_hardware_info.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -24,7 +24,8 @@ import ( // HostHardwareInfo is the default template for the HostSystem hardware property. // Capture method: -// govc object.collect -s -dump HostSystem:ha-host hardware +// +// govc object.collect -s -dump HostSystem:ha-host hardware var HostHardwareInfo = &types.HostHardwareInfo{ SystemInfo: types.HostSystemInfo{ Vendor: "VMware, Inc.", diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go index 9d1ae32dd4..79033344f4 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_storage_device_info.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,8 @@ import "github.com/vmware/govmomi/vim25/types" // HostStorageDeviceInfo is the default template for the HostSystem config.storageDevice property. // Capture method: -// govc object.collect -s -dump HostSystem:ha-host config.storageDevice +// +// govc object.collect -s -dump HostSystem:ha-host config.storageDevice var HostStorageDeviceInfo = types.HostStorageDeviceInfo{ HostBusAdapter: []types.BaseHostHostBusAdapter{ &types.HostParallelScsiHba{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/host_system.go b/vendor/github.com/vmware/govmomi/simulator/esx/host_system.go index febc2f245d..2cd0c685d4 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/host_system.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/host_system.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +25,8 @@ import ( // HostSystem is the default template for HostSystem properties. // Capture method: -// govc host.info -dump +// +// govc host.info -dump var HostSystem = mo.HostSystem{ ManagedEntity: mo.ManagedEntity{ ExtensibleManagedObject: mo.ExtensibleManagedObject{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go b/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go index 532f0ad5b9..52ce6568bd 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,8 @@ import "github.com/vmware/govmomi/vim25/types" // PerfCounter is the default template for the PerformanceManager perfCounter property. // Capture method: -// govc object.collect -s -dump PerformanceManager:ha-perfmgr perfCounter +// +// govc object.collect -s -dump PerformanceManager:ha-perfmgr perfCounter var PerfCounter = []types.PerfCounterInfo{ { Key: 0, diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager_data.go b/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager_data.go index 8d0eaca304..45c641e7f0 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager_data.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/performance_manager_data.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go b/vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go index 90382dd326..2373311aaa 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/resource_pool.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +25,8 @@ import ( // ResourcePool is the default template for ResourcePool properties. // Capture method: -// govc pool.info "*" -dump +// +// govc pool.info "*" -dump var ResourcePool = mo.ResourcePool{ ManagedEntity: mo.ManagedEntity{ ExtensibleManagedObject: mo.ExtensibleManagedObject{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go b/vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go index 3aefd1d812..1541de11a6 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/root_folder.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -23,7 +23,8 @@ import ( // RootFolder is the default template for the ServiceContent rootFolder property. // Capture method: -// govc folder.info -dump / +// +// govc folder.info -dump / var RootFolder = mo.Folder{ ManagedEntity: mo.ManagedEntity{ ExtensibleManagedObject: mo.ExtensibleManagedObject{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/service_content.go b/vendor/github.com/vmware/govmomi/simulator/esx/service_content.go index cc8938f878..bb10eeaa55 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/service_content.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/service_content.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,8 @@ import "github.com/vmware/govmomi/vim25/types" // ServiceContent is the default template for the ServiceInstance content property. // Capture method: -// govc object.collect -s -dump - content +// +// govc object.collect -s -dump - content var ServiceContent = types.ServiceContent{ RootFolder: types.ManagedObjectReference{Type: "Folder", Value: "ha-folder-root"}, PropertyCollector: types.ManagedObjectReference{Type: "PropertyCollector", Value: "ha-property-collector"}, diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/setting.go b/vendor/github.com/vmware/govmomi/simulator/esx/setting.go index 757dfc039c..54ec6ead07 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/setting.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/setting.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -21,13 +21,20 @@ import "github.com/vmware/govmomi/vim25/types" // HardwareVersion is the default VirtualMachine.Config.Version var HardwareVersion = "vmx-13" -// Setting is captured from ESX's HostSystem.configManager.advancedOption +// AdvancedOptions is captured from ESX's HostSystem.configManager.advancedOption // Capture method: -// govc object.collect -s -dump $(govc object.collect -s HostSystem:ha-host configManager.advancedOption) setting -var Setting = []types.BaseOptionValue{ +// +// govc object.collect -s -dump $(govc object.collect -s HostSystem:ha-host configManager.advancedOption) setting +var AdvancedOptions = []types.BaseOptionValue{ // This list is currently pruned to include a single option for testing &types.OptionValue{ Key: "Config.HostAgent.log.level", Value: "info", }, } + +// Setting is captured from ESX's HostSystem.ServiceContent.setting +// Capture method: +// +// govc object.collect -s -dump OptionManager:HostAgentSettings setting +var Setting = []types.BaseOptionValue{} diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/task_manager.go b/vendor/github.com/vmware/govmomi/simulator/esx/task_manager.go index b429ad4902..1b09aff0bd 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/task_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/task_manager.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,8 @@ import "github.com/vmware/govmomi/vim25/types" // Description is the default template for the TaskManager description property. // Capture method: -// govc object.collect -s -dump TaskManager:ha-taskmgr description +// +// govc object.collect -s -dump TaskManager:ha-taskmgr description var Description = types.TaskDescription{ MethodInfo: []types.BaseElementDescription{ &types.ElementDescription{ diff --git a/vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go b/vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go index 628d7e053d..a229ddceb8 100644 --- a/vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go +++ b/vendor/github.com/vmware/govmomi/simulator/esx/virtual_device.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,8 +20,9 @@ import "github.com/vmware/govmomi/vim25/types" // VirtualDevice is the default set of VirtualDevice types created for a VirtualMachine // Capture method: -// govc vm.create foo -// govc object.collect -s -dump vm/foo config.hardware.device +// +// govc vm.create foo +// govc object.collect -s -dump vm/foo config.hardware.device var VirtualDevice = []types.BaseVirtualDevice{ &types.VirtualIDEController{ VirtualController: types.VirtualController{ diff --git a/vendor/github.com/vmware/govmomi/simulator/guest_operations_manager.go b/vendor/github.com/vmware/govmomi/simulator/guest_operations_manager.go index 780f44a040..f058835808 100644 --- a/vendor/github.com/vmware/govmomi/simulator/guest_operations_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/guest_operations_manager.go @@ -69,7 +69,7 @@ func guestURL(ctx *Context, vm *VirtualMachine, path string) string { Host: "*", // See guest.FileManager.TransferURL Path: guestPrefix + strings.TrimPrefix(path, "/"), RawQuery: url.Values{ - "id": []string{vm.run.id}, + "id": []string{vm.svm.c.id}, "token": []string{ctx.Session.Key}, }.Encode(), }).String() @@ -79,7 +79,7 @@ func (m *GuestFileManager) InitiateFileTransferToGuest(ctx *Context, req *types. body := new(methods.InitiateFileTransferToGuestBody) vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - err := vm.run.prepareGuestOperation(vm, req.Auth) + err := vm.svm.prepareGuestOperation(req.Auth) if err != nil { body.Fault_ = Fault("", err) return body @@ -96,7 +96,7 @@ func (m *GuestFileManager) InitiateFileTransferFromGuest(ctx *Context, req *type body := new(methods.InitiateFileTransferFromGuestBody) vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - err := vm.run.prepareGuestOperation(vm, req.Auth) + err := vm.svm.prepareGuestOperation(req.Auth) if err != nil { body.Fault_ = Fault("", err) return body @@ -126,7 +126,7 @@ func (m *GuestProcessManager) StartProgramInGuest(ctx *Context, req *types.Start vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - fault := vm.run.prepareGuestOperation(vm, auth) + fault := vm.svm.prepareGuestOperation(auth) if fault != nil { body.Fault_ = Fault("", fault) } @@ -141,7 +141,7 @@ func (m *GuestProcessManager) StartProgramInGuest(ctx *Context, req *types.Start args = append(args, "-e", e) } - args = append(args, vm.run.id, spec.ProgramPath, spec.Arguments) + args = append(args, vm.svm.c.id, spec.ProgramPath, spec.Arguments) spec.ProgramPath = "docker" spec.Arguments = strings.Join(args, " ") @@ -213,7 +213,7 @@ func (m *GuestFileManager) mktemp(ctx *Context, req *types.CreateTemporaryFileIn vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - return vm.run.exec(ctx, vm, req.Auth, args) + return vm.svm.exec(ctx, req.Auth, args) } func (m *GuestFileManager) CreateTemporaryFileInGuest(ctx *Context, req *types.CreateTemporaryFileInGuest) soap.HasFault { @@ -298,7 +298,7 @@ func (m *GuestFileManager) ListFilesInGuest(ctx *Context, req *types.ListFilesIn return body } - res, fault := vm.run.exec(ctx, vm, req.Auth, listFiles(req)) + res, fault := vm.svm.exec(ctx, req.Auth, listFiles(req)) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -317,7 +317,7 @@ func (m *GuestFileManager) DeleteFileInGuest(ctx *Context, req *types.DeleteFile vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -338,7 +338,7 @@ func (m *GuestFileManager) DeleteDirectoryInGuest(ctx *Context, req *types.Delet vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -359,7 +359,7 @@ func (m *GuestFileManager) MakeDirectoryInGuest(ctx *Context, req *types.MakeDir vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -381,7 +381,7 @@ func (m *GuestFileManager) MoveFileInGuest(ctx *Context, req *types.MoveFileInGu vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -399,7 +399,7 @@ func (m *GuestFileManager) MoveDirectoryInGuest(ctx *Context, req *types.MoveDir vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -424,7 +424,7 @@ func (m *GuestFileManager) ChangeFileAttributesInGuest(ctx *Context, req *types. if attr.Permissions != 0 { args := []string{"chmod", fmt.Sprintf("%#o", attr.Permissions), req.GuestFilePath} - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -443,7 +443,7 @@ func (m *GuestFileManager) ChangeFileAttributesInGuest(ctx *Context, req *types. if c.id != nil { args := []string{c.cmd, fmt.Sprintf("%d", *c.id), req.GuestFilePath} - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body diff --git a/vendor/github.com/vmware/govmomi/simulator/host_system.go b/vendor/github.com/vmware/govmomi/simulator/host_system.go index f28101a8c3..fd88cb4978 100644 --- a/vendor/github.com/vmware/govmomi/simulator/host_system.go +++ b/vendor/github.com/vmware/govmomi/simulator/host_system.go @@ -17,8 +17,10 @@ limitations under the License. package simulator import ( + "fmt" "net" "os" + "sync" "time" "github.com/vmware/govmomi/simulator/esx" @@ -30,10 +32,16 @@ import ( var ( hostPortUnique = os.Getenv("VCSIM_HOST_PORT_UNIQUE") == "true" + + globalLock sync.Mutex + // globalHostCount is used to construct unique hostnames. Should be consumed under globalLock. + globalHostCount = 0 ) type HostSystem struct { mo.HostSystem + + sh *simHost } func asHostSystemMO(obj mo.Reference) (*mo.HostSystem, bool) { @@ -72,13 +80,23 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { deepCopy(hs.Config, cfg) hs.Config = cfg + // copy over the reference advanced options so each host can have it's own, allowing hosts to be configured for + // container backing individually + deepCopy(esx.AdvancedOptions, &cfg.Option) + + // add a supported option to the AdvancedOption manager + simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: advOptContainerBackingImage}} + // TODO: how do we enter patterns here? Or should we stick to a list in the value? + // patterns become necessary if we want to enforce correctness on options for RUN.underlay. or allow RUN.port.xxx + hs.Config.OptionDef = append(hs.Config.OptionDef, simOption) + config := []struct { ref **types.ManagedObjectReference obj mo.Reference }{ {&hs.ConfigManager.DatastoreSystem, &HostDatastoreSystem{Host: &hs.HostSystem}}, {&hs.ConfigManager.NetworkSystem, NewHostNetworkSystem(&hs.HostSystem)}, - {&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, esx.Setting)}, + {&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, nil, &hs.Config.Option)}, {&hs.ConfigManager.FirewallSystem, NewHostFirewallSystem(&hs.HostSystem)}, {&hs.ConfigManager.StorageSystem, NewHostStorageSystem(&hs.HostSystem)}, } @@ -92,12 +110,23 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { return hs } -func (h *HostSystem) configure(spec types.HostConnectSpec, connected bool) { +func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connected bool) { h.Runtime.ConnectionState = types.HostSystemConnectionStateDisconnected if connected { h.Runtime.ConnectionState = types.HostSystemConnectionStateConnected } - if net.ParseIP(spec.HostName) != nil { + + // lets us construct non-conflicting hostname automatically if omitted + // does not use the unique port instead to avoid constraints on port, such as >1024 + + globalLock.Lock() + instanceID := globalHostCount + globalHostCount++ + globalLock.Unlock() + + if spec.HostName == "" { + spec.HostName = fmt.Sprintf("esx-%d", instanceID) + } else if net.ParseIP(spec.HostName) != nil { h.Config.Network.Vnic[0].Spec.Ip.IpAddress = spec.HostName } @@ -106,6 +135,241 @@ func (h *HostSystem) configure(spec types.HostConnectSpec, connected bool) { id := newUUID(h.Name) h.Summary.Hardware.Uuid = id h.Hardware.SystemInfo.Uuid = id + + var err error + h.sh, err = createSimulationHost(ctx, h) + if err != nil { + panic("failed to create simulation host and no path to return error: " + err.Error()) + } +} + +// configureContainerBacking sets up _this_ host for simulation using a container backing. +// Args: +// +// image - the container image with which to simulate the host +// mounts - array of mount info that should be translated into /vmfs/volumes/... mounts backed by container volumes +// networks - names of bridges to use for underlays. Will create a pNIC for each. The first will be treated as the management network. +// +// Restrictions adopted from createSimulationHost: +// * no mock of VLAN connectivity +// * only a single vmknic, used for "the management IP" +// * pNIC connectivity does not directly impact VMs/vmks using it as uplink +// +// The pnics will be named using standard pattern, ie. vmnic0, vmnic1, ... +// This will sanity check the NetConfig for "management" nicType to ensure that it maps through PortGroup->vSwitch->pNIC to vmnic0. +func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo, networks ...string) error { + option := &types.OptionValue{ + Key: advOptContainerBackingImage, + Value: image, + } + + advOpts := ctx.Map.Get(h.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + if fault != nil { + panic(fault) + } + + h.Config.FileSystemVolume = nil + if mounts != nil { + h.Config.FileSystemVolume = &types.HostFileSystemVolumeInfo{ + VolumeTypeList: []string{"VMFS", "OTHER"}, + MountInfo: mounts, + } + } + + // force at least a management network + if len(networks) == 0 { + networks = []string{defaultUnderlayBridgeName} + } + + // purge pNICs from the template - it makes no sense to keep them for a sim host + h.Config.Network.Pnic = make([]types.PhysicalNic, len(networks)) + + // purge any IPs and MACs associated with existing NetConfigs for the host + for cfgIdx := range h.Config.VirtualNicManagerInfo.NetConfig { + config := &h.Config.VirtualNicManagerInfo.NetConfig[cfgIdx] + for candidateIdx := range config.CandidateVnic { + candidate := &config.CandidateVnic[candidateIdx] + candidate.Spec.Ip.IpAddress = "0.0.0.0" + candidate.Spec.Ip.SubnetMask = "0.0.0.0" + candidate.Spec.Mac = "00:00:00:00:00:00" + } + } + + // The presence of a pNIC is used to indicate connectivity to a specific underlay. We construct an empty pNIC entry and specify the underly via + // host.ConfigManager.AdvancedOptions. The pNIC will be populated with the MAC (accurate) and IP (divergence - we need to stash it somewhere) for the veth. + // We create a NetConfig "management" entry for the first pNIC - this will be populated with the IP of the "host" container. + + // create a pNIC for each underlay + for i, net := range networks { + name := fmt.Sprintf("vmnic%d", i) + + // we don't have a natural field for annotating which pNIC is connected to which network, so stash it in an adv option. + option := &types.OptionValue{ + Key: advOptPrefixPnicToUnderlayPrefix + name, + Value: net, + } + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + if fault != nil { + panic(fault) + } + + h.Config.Network.Pnic[i] = types.PhysicalNic{ + Key: "key-vim.host.PhysicalNic-" + name, + Device: name, + Pci: fmt.Sprintf("0000:%2d:00.0", i+1), + Driver: "vcsim-bridge", + DriverVersion: "1.2.10.0", + FirmwareVersion: "1.57, 0x80000185", + LinkSpeed: &types.PhysicalNicLinkInfo{ + SpeedMb: 10000, + Duplex: true, + }, + ValidLinkSpecification: []types.PhysicalNicLinkInfo{ + { + SpeedMb: 10000, + Duplex: true, + }, + }, + Spec: types.PhysicalNicSpec{ + Ip: &types.HostIpConfig{}, + LinkSpeed: (*types.PhysicalNicLinkInfo)(nil), + EnableEnhancedNetworkingStack: types.NewBool(false), + EnsInterruptEnabled: types.NewBool(false), + }, + WakeOnLanSupported: false, + Mac: "00:00:00:00:00:00", + FcoeConfiguration: &types.FcoeConfig{ + PriorityClass: 3, + SourceMac: "00:00:00:00:00:00", + VlanRange: []types.FcoeConfigVlanRange{ + {}, + }, + Capabilities: types.FcoeConfigFcoeCapabilities{}, + FcoeActive: false, + }, + VmDirectPathGen2Supported: types.NewBool(false), + VmDirectPathGen2SupportedMode: "", + ResourcePoolSchedulerAllowed: types.NewBool(false), + ResourcePoolSchedulerDisallowedReason: nil, + AutoNegotiateSupported: types.NewBool(true), + EnhancedNetworkingStackSupported: types.NewBool(false), + EnsInterruptSupported: types.NewBool(false), + RdmaDevice: "", + DpuId: "", + } + } + + // sanity check that everything's hung together sufficiently well + details, err := h.getNetConfigInterface(ctx, "management") + if err != nil { + return err + } + + if details.uplink == nil || details.uplink.Device != "vmnic0" { + return fmt.Errorf("Config provided for host %s does not result in a consistent 'management' NetConfig that's bound to 'vmnic0'", h.Name) + } + + return nil +} + +// netConfigDetails is used to packaged up all the related network entities associated with a NetConfig binding +type netConfigDetails struct { + nicType string + netconfig *types.VirtualNicManagerNetConfig + vmk *types.HostVirtualNic + netstack *types.HostNetStackInstance + portgroup *types.HostPortGroup + vswitch *types.HostVirtualSwitch + uplink *types.PhysicalNic +} + +// getNetConfigInterface returns the set of constructs active for a given nicType (eg. "management", "vmotion") +// This method is provided because the Config structure held by HostSystem is heavily interconnected but serialized and not cross-linked with pointers. +// As such there's a _lot_ of cross-referencing that needs to be done to navigate. +// The pNIC returned is the uplink associated with the vSwitch for the netconfig +func (h *HostSystem) getNetConfigInterface(ctx *Context, nicType string) (*netConfigDetails, error) { + details := &netConfigDetails{ + nicType: nicType, + } + + for i := range h.Config.VirtualNicManagerInfo.NetConfig { + if h.Config.VirtualNicManagerInfo.NetConfig[i].NicType == nicType { + details.netconfig = &h.Config.VirtualNicManagerInfo.NetConfig[i] + break + } + } + if details.netconfig == nil { + return nil, fmt.Errorf("no matching NetConfig for NicType=%s", nicType) + } + + if details.netconfig.SelectedVnic == nil { + return details, nil + } + + vnicKey := details.netconfig.SelectedVnic[0] + for i := range details.netconfig.CandidateVnic { + if details.netconfig.CandidateVnic[i].Key == vnicKey { + details.vmk = &details.netconfig.CandidateVnic[i] + break + } + } + if details.vmk == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vNIC key %s for %s nicType", h.Name, vnicKey, nicType)) + } + + portgroupName := details.vmk.Portgroup + netstackKey := details.vmk.Spec.NetStackInstanceKey + + for i := range h.Config.Network.NetStackInstance { + if h.Config.Network.NetStackInstance[i].Key == netstackKey { + details.netstack = &h.Config.Network.NetStackInstance[i] + break + } + } + if details.netstack == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant NetStack key %s for %s nicType", h.Name, netstackKey, nicType)) + } + + for i := range h.Config.Network.Portgroup { + // TODO: confirm correctness of this - seems weird it references the Spec.Name instead of the key like everything else. + if h.Config.Network.Portgroup[i].Spec.Name == portgroupName { + details.portgroup = &h.Config.Network.Portgroup[i] + break + } + } + if details.portgroup == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant PortGroup name %s for %s nicType", h.Name, portgroupName, nicType)) + } + + vswitchKey := details.portgroup.Vswitch + for i := range h.Config.Network.Vswitch { + if h.Config.Network.Vswitch[i].Key == vswitchKey { + details.vswitch = &h.Config.Network.Vswitch[i] + break + } + } + if details.vswitch == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vSwitch key %s for %s nicType", h.Name, vswitchKey, nicType)) + } + + if len(details.vswitch.Pnic) != 1 { + // to change this, look at the Active NIC in the NicTeamingPolicy, but for now not worth it + panic(fmt.Sprintf("vSwitch %s for host %s has multiple pNICs associated which is not supported.", vswitchKey, h.Name)) + } + + pnicKey := details.vswitch.Pnic[0] + for i := range h.Config.Network.Pnic { + if h.Config.Network.Pnic[i].Key == pnicKey { + details.uplink = &h.Config.Network.Pnic[i] + break + } + } + if details.uplink == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant pNIC key %s for %s nicType", h.Name, pnicKey, nicType)) + } + + return details, nil } func (h *HostSystem) event() types.HostEvent { @@ -207,7 +471,7 @@ func CreateStandaloneHost(ctx *Context, f *Folder, spec types.HostConnectSpec) ( pool := NewResourcePool() host := NewHostSystem(template) - host.configure(spec, false) + host.configure(ctx, spec, false) summary := new(types.ComputeResourceSummary) addComputeResource(summary, host) @@ -222,6 +486,7 @@ func CreateStandaloneHost(ctx *Context, f *Folder, spec types.HostConnectSpec) ( ctx.Map.PutEntity(cr, ctx.Map.NewEntity(host)) host.Summary.Host = &host.Self + host.Config.Host = host.Self ctx.Map.PutEntity(cr, ctx.Map.NewEntity(pool)) @@ -247,6 +512,17 @@ func (h *HostSystem) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.Has f := ctx.Map.getEntityParent(h, "Folder").(*Folder) folderRemoveChild(ctx, &f.Folder, h.Reference()) + err := h.sh.remove(ctx) + + if err != nil { + return nil, &types.RuntimeFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}} + } + + // TODO: should there be events on lifecycle operations as with VMs? return nil, nil }) diff --git a/vendor/github.com/vmware/govmomi/simulator/http_nfc_lease.go b/vendor/github.com/vmware/govmomi/simulator/http_nfc_lease.go index 26e0d299fc..12ccb6d467 100644 --- a/vendor/github.com/vmware/govmomi/simulator/http_nfc_lease.go +++ b/vendor/github.com/vmware/govmomi/simulator/http_nfc_lease.go @@ -17,9 +17,11 @@ limitations under the License. package simulator import ( + "crypto/sha1" + "encoding/hex" "fmt" + "hash" "io" - "io/ioutil" "log" "net/http" "os" @@ -32,9 +34,15 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +type metadata struct { + sha1 []byte + size int64 +} + type HttpNfcLease struct { mo.HttpNfcLease - files map[string]string + files map[string]string + metadata map[string]metadata } var ( @@ -62,12 +70,12 @@ func ServeNFC(w http.ResponseWriter, r *http.Request) { } status := http.StatusOK - var dst io.Writer + var dst hash.Hash var src io.ReadCloser switch r.Method { case http.MethodPut, http.MethodPost: - dst = ioutil.Discard + dst = sha1.New() src = r.Body case http.MethodGet: f, err := os.Open(file) @@ -82,6 +90,12 @@ func ServeNFC(w http.ResponseWriter, r *http.Request) { n, err := io.Copy(dst, src) _ = src.Close() + if dst != nil { + lease.metadata[name] = metadata{ + sha1: dst.Sum(nil), + size: n, + } + } msg := fmt.Sprintf("transferred %d bytes", n) if err != nil { @@ -101,7 +115,8 @@ func NewHttpNfcLease(ctx *Context, entity types.ManagedObjectReference) *HttpNfc }, State: types.HttpNfcLeaseStateReady, }, - files: make(map[string]string), + files: make(map[string]string), + metadata: make(map[string]metadata), } ctx.Session.Put(lease) @@ -135,3 +150,28 @@ func (l *HttpNfcLease) HttpNfcLeaseProgress(ctx *Context, req *types.HttpNfcLeas Res: new(types.HttpNfcLeaseProgressResponse), } } + +func (l *HttpNfcLease) getDeviceKey(name string) string { + for _, devUrl := range l.Info.DeviceUrl { + if name == devUrl.TargetId { + return devUrl.Key + } + } + return "unknown" +} + +func (l *HttpNfcLease) HttpNfcLeaseGetManifest(ctx *Context, req *types.HttpNfcLeaseGetManifest) soap.HasFault { + entries := []types.HttpNfcLeaseManifestEntry{} + for name, md := range l.metadata { + entries = append(entries, types.HttpNfcLeaseManifestEntry{ + Key: l.getDeviceKey(name), + Sha1: hex.EncodeToString(md.sha1), + Size: md.size, + }) + } + return &methods.HttpNfcLeaseGetManifestBody{ + Res: &types.HttpNfcLeaseGetManifestResponse{ + Returnval: entries, + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/simulator/internal/server.go b/vendor/github.com/vmware/govmomi/simulator/internal/server.go index 1f814dea29..877eca0dcc 100644 --- a/vendor/github.com/vmware/govmomi/simulator/internal/server.go +++ b/vendor/github.com/vmware/govmomi/simulator/internal/server.go @@ -286,15 +286,18 @@ func (s *Server) wrap() { s.Config.ConnState = func(c net.Conn, cs http.ConnState) { s.mu.Lock() defer s.mu.Unlock() + switch cs { case http.StateNew: - s.wg.Add(1) if _, exists := s.conns[c]; exists { panic("invalid state transition") } if s.conns == nil { s.conns = make(map[net.Conn]http.ConnState) } + // Add c to the set of tracked conns and increment it to the + // waitgroup. + s.wg.Add(1) s.conns[c] = cs if s.closed { // Probably just a socket-late-binding dial from @@ -321,7 +324,14 @@ func (s *Server) wrap() { s.closeConn(c) } case http.StateHijacked, http.StateClosed: - s.forgetConn(c) + // Remove c from the set of tracked conns and decrement it from the + // waitgroup, unless it was previously removed. + if _, ok := s.conns[c]; ok { + delete(s.conns, c) + // Keep Close from returning until the user's ConnState hook + // (if any) finishes. + defer s.wg.Done() + } } if oldHook != nil { oldHook(c, cs) @@ -341,13 +351,3 @@ func (s *Server) closeConnChan(c net.Conn, done chan<- struct{}) { done <- struct{}{} } } - -// forgetConn removes c from the set of tracked conns and decrements it from the -// waitgroup, unless it was previously removed. -// s.mu must be held. -func (s *Server) forgetConn(c net.Conn) { - if _, ok := s.conns[c]; ok { - delete(s.conns, c) - s.wg.Done() - } -} diff --git a/vendor/github.com/vmware/govmomi/simulator/model.go b/vendor/github.com/vmware/govmomi/simulator/model.go index 7d137dd2be..433ddc3cfa 100644 --- a/vendor/github.com/vmware/govmomi/simulator/model.go +++ b/vendor/github.com/vmware/govmomi/simulator/model.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017-2021 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -75,32 +75,32 @@ type Model struct { // Datacenter specifies the number of Datacenter entities to create // Name prefix: DC, vcsim flag: -dc - Datacenter int + Datacenter int `json:"datacenter"` // Portgroup specifies the number of DistributedVirtualPortgroup entities to create per Datacenter // Name prefix: DVPG, vcsim flag: -pg - Portgroup int + Portgroup int `json:"portgroup"` // PortgroupNSX specifies the number NSX backed DistributedVirtualPortgroup entities to create per Datacenter // Name prefix: NSXPG, vcsim flag: -nsx-pg - PortgroupNSX int + PortgroupNSX int `json:"portgroupNSX"` // OpaqueNetwork specifies the number of OpaqueNetwork entities to create per Datacenter, // with Summary.OpaqueNetworkType set to nsx.LogicalSwitch and Summary.OpaqueNetworkId to a random uuid. // Name prefix: NSX, vcsim flag: -nsx - OpaqueNetwork int + OpaqueNetwork int `json:"opaqueNetwork"` // Host specifies the number of standalone HostSystems entities to create per Datacenter // Name prefix: H, vcsim flag: -standalone-host - Host int `json:",omitempty"` + Host int `json:"host,omitempty"` // Cluster specifies the number of ClusterComputeResource entities to create per Datacenter // Name prefix: C, vcsim flag: -cluster - Cluster int + Cluster int `json:"cluster"` // ClusterHost specifies the number of HostSystems entities to create within a Cluster // Name prefix: H, vcsim flag: -host - ClusterHost int `json:",omitempty"` + ClusterHost int `json:"clusterHost,omitempty"` // Pool specifies the number of ResourcePool entities to create per Cluster // Note that every cluster has a root ResourcePool named "Resources", as real vCenter does. @@ -110,13 +110,13 @@ type Model struct { // Note that this flag is not effective on standalone hosts. // For example: /DC0/host/DC0_C0/Resources/DC0_C0_RP1 // Name prefix: RP, vcsim flag: -pool - Pool int + Pool int `json:"pool"` // Datastore specifies the number of Datastore entities to create // Each Datastore will have temporary local file storage and will be mounted // on every HostSystem created by the ModelConfig // Name prefix: LocalDS, vcsim flag: -ds - Datastore int + Datastore int `json:"datastore"` // Machine specifies the number of VirtualMachine entities to create per // ResourcePool. If the pool flag is specified, the specified number of virtual @@ -125,21 +125,21 @@ type Model struct { // prefixed with RP0. On standalone hosts, machines are always deployed into the // root resource pool without any prefix. // Name prefix: VM, vcsim flag: -vm - Machine int + Machine int `json:"machine"` // Folder specifies the number of Datacenter to place within a Folder. // This includes a folder for the Datacenter itself and its host, vm, network and datastore folders. // All resources for the Datacenter are placed within these folders, rather than the top-level folders. // Name prefix: F, vcsim flag: -folder - Folder int + Folder int `json:"folder"` // App specifies the number of VirtualApp to create per Cluster // Name prefix: APP, vcsim flag: -app - App int + App int `json:"app"` // Pod specifies the number of StoragePod to create per Cluster // Name prefix: POD, vcsim flag: -pod - Pod int + Pod int `json:"pod"` // Delay configurations DelayConfig DelayConfig `json:"-"` @@ -477,7 +477,10 @@ func (m *Model) Create() error { ctx := SpoofContext() m.Service = New(NewServiceInstance(ctx, m.ServiceContent, m.RootFolder)) ctx.Map = Map + return m.CreateInfrastructure(ctx) +} +func (m *Model) CreateInfrastructure(ctx *Context) error { client := m.Service.client root := object.NewRootFolder(client) @@ -492,7 +495,7 @@ func (m *Model) Create() error { // 1 NIC per VM, backed by a DVPG if Model.Portgroup > 0 vmnet := esx.EthernetCard.Backing - // addHost adds a cluster host or a stanalone host. + // addHost adds a cluster host or a standalone host. addHost := func(name string, f func(types.HostConnectSpec) (*object.Task, error)) (*object.HostSystem, error) { spec := types.HostConnectSpec{ HostName: name, @@ -855,7 +858,7 @@ func (m *Model) Remove() { Map.m.Lock() for _, obj := range Map.objects { if vm, ok := obj.(*VirtualMachine); ok { - vm.run.remove(vm) + vm.svm.remove(SpoofContext()) } } Map.m.Unlock() @@ -876,9 +879,10 @@ func (m *Model) Run(f func(context.Context, *vim25.Client) error) error { if err != nil { return err } + // Only force TLS if the provided model didn't have any Service. + m.Service.TLS = new(tls.Config) } - m.Service.TLS = new(tls.Config) m.Service.RegisterEndpoints = true s := m.Service.NewServer() diff --git a/vendor/github.com/vmware/govmomi/simulator/option_manager.go b/vendor/github.com/vmware/govmomi/simulator/option_manager.go index efcdee2153..1dd1688cd6 100644 --- a/vendor/github.com/vmware/govmomi/simulator/option_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/option_manager.go @@ -28,19 +28,45 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +// OptionManager is used in at least two locations for ESX: +// 1. ServiceContent.setting - this is empty on ESX and //TODO on VC +// 2. ConfigManager.advancedOption - this is where the bulk of the ESX settings are found type OptionManager struct { mo.OptionManager + + // mirror is an array to keep in sync with OptionManager.Settings. Necessary because we use append. + // uni-directional - changes made to the mirrored array are not reflected back to Settings + mirror *[]types.BaseOptionValue +} + +func asOptionManager(ctx *Context, obj mo.Reference) (*OptionManager, bool) { + om, ok := ctx.Map.Get(obj.Reference()).(*OptionManager) + return om, ok } -func NewOptionManager(ref *types.ManagedObjectReference, setting []types.BaseOptionValue) object.Reference { +// NewOptionManager constructs the type. If mirror is non-nil it takes precedence over settings, and settings is ignored. +// Args: +// - ref - used to set OptionManager.Self if non-nil +// - setting - initial options, may be nil. +// - mirror - options array to keep updated with the OptionManager.Settings, may be nil. +func NewOptionManager(ref *types.ManagedObjectReference, setting []types.BaseOptionValue, mirror *[]types.BaseOptionValue) object.Reference { s := &OptionManager{} + + s.Setting = setting + if mirror != nil { + s.mirror = mirror + s.Setting = *mirror + } + if ref != nil { s.Self = *ref } - s.Setting = setting + return s } +// init constructs the OptionManager for ServiceContent.setting from the template directories. +// This does _not_ construct the OptionManager for ConfigManager.advancedOption. func (m *OptionManager) init(r *Registry) { if len(m.Setting) == 0 { if r.IsVPX() { @@ -103,6 +129,9 @@ func (m *OptionManager) UpdateOptions(req *types.UpdateOptions) soap.HasFault { } m.Setting = append(m.Setting, change) + if m.mirror != nil { + *m.mirror = m.Setting + } } body.Res = new(types.UpdateOptionsResponse) diff --git a/vendor/github.com/vmware/govmomi/simulator/registry.go b/vendor/github.com/vmware/govmomi/simulator/registry.go index c91af59e2a..4b3e330607 100644 --- a/vendor/github.com/vmware/govmomi/simulator/registry.go +++ b/vendor/github.com/vmware/govmomi/simulator/registry.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017-2021 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -577,8 +577,8 @@ func (r *Registry) MarshalJSON() ([]byte, error) { defer r.m.Unlock() vars := struct { - Objects int - Locks int + Objects int `json:"objects"` + Locks int `json:"locks"` }{ len(r.objects), len(r.locks), diff --git a/vendor/github.com/vmware/govmomi/simulator/simulator.go b/vendor/github.com/vmware/govmomi/simulator/simulator.go index 673031fc95..c5359217bc 100644 --- a/vendor/github.com/vmware/govmomi/simulator/simulator.go +++ b/vendor/github.com/vmware/govmomi/simulator/simulator.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -338,8 +338,8 @@ func (r *response) MarshalXML(e *xml.Encoder, start xml.StartElement) error { // About generates some info about the simulator. func (s *Service) About(w http.ResponseWriter, r *http.Request) { var about struct { - Methods []string - Types []string + Methods []string `json:"methods"` + Types []string `json:"types"` } seen := make(map[string]bool) @@ -421,7 +421,9 @@ func (s *Service) HandleFunc(pattern string, handler func(http.ResponseWriter, * // RegisterSDK adds an HTTP handler for the Registry's Path and Namespace. // If r.Path is already registered, r's objects are added to the existing Registry. -func (s *Service) RegisterSDK(r *Registry) { +// An optional set of aliases can be provided to register the same handler for +// multiple paths. +func (s *Service) RegisterSDK(r *Registry, alias ...string) { if existing, ok := s.sdk[r.Path]; ok { for id, obj := range r.objects { existing.objects[id] = obj @@ -435,6 +437,11 @@ func (s *Service) RegisterSDK(r *Registry) { s.sdk[r.Path] = r s.ServeMux.HandleFunc(r.Path, s.ServeSDK) + + for _, p := range alias { + s.sdk[p] = r + s.ServeMux.HandleFunc(p, s.ServeSDK) + } } // StatusSDK can be used to simulate an /sdk HTTP response code other than 200. @@ -654,12 +661,9 @@ func defaultIP(addr *net.TCPAddr) string { // NewServer returns an http Server instance for the given service func (s *Service) NewServer() *Server { - s.RegisterSDK(Map) + s.RegisterSDK(Map, Map.Path+"/vimService") mux := s.ServeMux - vim := Map.Path + "/vimService" - s.sdk[vim] = s.sdk[vim25.Path] - mux.HandleFunc(vim, s.ServeSDK) mux.HandleFunc(Map.Path+"/vimServiceVersions.xml", s.ServiceVersions) mux.HandleFunc(folderPrefix, s.ServeDatastore) mux.HandleFunc(guestPrefix, ServeGuest) diff --git a/vendor/github.com/vmware/govmomi/simulator/snapshot.go b/vendor/github.com/vmware/govmomi/simulator/snapshot.go index bb1b66f6ff..55d43658d5 100644 --- a/vendor/github.com/vmware/govmomi/simulator/snapshot.go +++ b/vendor/github.com/vmware/govmomi/simulator/snapshot.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,6 +30,7 @@ import ( type VirtualMachineSnapshot struct { mo.VirtualMachineSnapshot + DataSets map[string]*DataSet } func (v *VirtualMachineSnapshot) createSnapshotFiles() types.BaseMethodFault { @@ -158,6 +159,7 @@ func (v *VirtualMachineSnapshot) RevertToSnapshotTask(ctx *Context, req *types.R vm := ctx.Map.Get(v.Vm).(*VirtualMachine) ctx.WithLock(vm, func() { + vm.DataSets = copyDataSetsForVmClone(v.DataSets) ctx.Map.Update(vm, []types.PropertyChange{ {Name: "snapshot.currentSnapshot", Val: v.Self}, }) diff --git a/vendor/github.com/vmware/govmomi/simulator/task.go b/vendor/github.com/vmware/govmomi/simulator/task.go index bd43f7420b..73c52b919c 100644 --- a/vendor/github.com/vmware/govmomi/simulator/task.go +++ b/vendor/github.com/vmware/govmomi/simulator/task.go @@ -110,10 +110,21 @@ func (t *Task) Run(ctx *Context) types.ManagedObjectReference { // in most cases, the caller already holds this lock, and we would like // the lock to be held across the "hand off" to the async goroutine. - unlock := vimMap.AcquireLock(ctx, tr) - + // however, with a TaskDelay, PropertyCollector (for example) cannot read + // any object properties while the lock is held. + handoff := true + if v, ok := TaskDelay.MethodDelay["LockHandoff"]; ok { + handoff = v != 0 + } + var unlock func() + if handoff { + unlock = vimMap.AcquireLock(ctx, tr) + } go func() { TaskDelay.delay(t.Info.Name) + if !handoff { + unlock = vimMap.AcquireLock(ctx, tr) + } res, err := t.Execute(t) unlock() diff --git a/vendor/github.com/vmware/govmomi/simulator/view_manager.go b/vendor/github.com/vmware/govmomi/simulator/view_manager.go index 1deb9ec164..842ff53d67 100644 --- a/vendor/github.com/vmware/govmomi/simulator/view_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/view_manager.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -227,7 +227,7 @@ func (m *ViewManager) CreateListView(ctx *Context, req *types.CreateListView) so body := new(methods.CreateListViewBody) list := new(ListView) - if err := list.add(req.Obj); err != nil { + if err := list.add(ctx, req.Obj); err != nil { body.Fault_ = Fault("", err) return body } @@ -245,13 +245,13 @@ type ListView struct { mo.ListView } -func (v *ListView) update() { - Map.Update(v, []types.PropertyChange{{Name: "view", Val: v.View}}) +func (v *ListView) update(ctx *Context) { + ctx.Map.Update(v, []types.PropertyChange{{Name: "view", Val: v.View}}) } -func (v *ListView) add(refs []types.ManagedObjectReference) *types.ManagedObjectNotFound { +func (v *ListView) add(ctx *Context, refs []types.ManagedObjectReference) *types.ManagedObjectNotFound { for _, ref := range refs { - obj := Map.Get(ref) + obj := ctx.Session.Get(ref) if obj == nil { return &types.ManagedObjectNotFound{Obj: ref} } @@ -265,14 +265,14 @@ func (v *ListView) DestroyView(ctx *Context, c *types.DestroyView) soap.HasFault return destroyView(c.This) } -func (v *ListView) ModifyListView(req *types.ModifyListView) soap.HasFault { +func (v *ListView) ModifyListView(ctx *Context, req *types.ModifyListView) soap.HasFault { body := new(methods.ModifyListViewBody) for _, ref := range req.Remove { RemoveReference(&v.View, ref) } - if err := v.add(req.Add); err != nil { + if err := v.add(ctx, req.Add); err != nil { body.Fault_ = Fault("", err) return body } @@ -280,25 +280,25 @@ func (v *ListView) ModifyListView(req *types.ModifyListView) soap.HasFault { body.Res = new(types.ModifyListViewResponse) if len(req.Remove) != 0 || len(req.Add) != 0 { - v.update() + v.update(ctx) } return body } -func (v *ListView) ResetListView(req *types.ResetListView) soap.HasFault { +func (v *ListView) ResetListView(ctx *Context, req *types.ResetListView) soap.HasFault { body := new(methods.ResetListViewBody) v.View = nil - if err := v.add(req.Obj); err != nil { + if err := v.add(ctx, req.Obj); err != nil { body.Fault_ = Fault("", err) return body } body.Res = new(types.ResetListViewResponse) - v.update() + v.update(ctx) return body } diff --git a/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go b/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go index 8deeceda7a..fb6f6f1f6e 100644 --- a/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go +++ b/vendor/github.com/vmware/govmomi/simulator/virtual_machine.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -43,10 +43,11 @@ import ( type VirtualMachine struct { mo.VirtualMachine + DataSets map[string]*DataSet log string sid int32 - run container + svm *simVM uid uuid.UUID imc *types.CustomizationSpec } @@ -165,6 +166,7 @@ func NewVirtualMachine(ctx *Context, parent types.ManagedObjectReference, spec * vm.Summary.QuickStats.GuestHeartbeatStatus = types.ManagedEntityStatusGray vm.Summary.OverallStatus = types.ManagedEntityStatusGreen vm.ConfigStatus = types.ManagedEntityStatusGreen + vm.DataSets = make(map[string]*DataSet) // put vm in the folder only if no errors occurred f, _ := asFolderMO(folder) @@ -394,13 +396,46 @@ func extraConfigKey(key string) string { return key } -func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) { +func (vm *VirtualMachine) applyExtraConfig(ctx *Context, spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { + var removedContainerBacking bool var changes []types.PropertyChange for _, c := range spec.ExtraConfig { val := c.GetOptionValue() key := strings.TrimPrefix(extraConfigKey(val.Key), "SET.") if key == val.Key { - vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, c) + keyIndex := -1 + for i := range vm.Config.ExtraConfig { + bov := vm.Config.ExtraConfig[i] + if bov == nil { + continue + } + ov := bov.GetOptionValue() + if ov == nil { + continue + } + if ov.Key == key { + keyIndex = i + break + } + } + if keyIndex < 0 { + vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, c) + } else { + if s, ok := val.Value.(string); ok && s == "" { + if key == ContainerBackingOptionKey { + removedContainerBacking = true + } + // Remove existing element + l := len(vm.Config.ExtraConfig) + vm.Config.ExtraConfig[keyIndex] = vm.Config.ExtraConfig[l-1] + vm.Config.ExtraConfig[l-1] = nil + vm.Config.ExtraConfig = vm.Config.ExtraConfig[:l-1] + } else { + // Update existing element + vm.Config.ExtraConfig[keyIndex].GetOptionValue().Value = val.Value + } + } + continue } changes = append(changes, types.PropertyChange{Name: key, Val: val.Value}) @@ -421,9 +456,52 @@ func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) ) } } + + // create the container backing before we publish the updates so the simVM is available before handlers + // get triggered + var fault types.BaseMethodFault + if vm.svm == nil { + vm.svm = createSimulationVM(vm) + + // check to see if the VM is already powered on - if so we need to retroactively hit that path here + if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { + err := vm.svm.start(ctx) + if err != nil { + // don't attempt to undo the changes already made - just return an error + // we'll retry the svm.start operation on pause/restart calls + fault = &types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}} + } + } + } else if removedContainerBacking { + err := vm.svm.remove(ctx) + if err == nil { + // remove link from container to VM so callbacks no longer reflect state + vm.svm.vm = nil + // nil container backing reference to return this to a pure in-mem simulated VM + vm.svm = nil + + } else { + // don't attempt to undo the changes already made - just return an error + // we'll retry the svm.start operation on pause/restart calls + fault = &types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}} + } + } + if len(changes) != 0 { Map.Update(vm, changes) } + + return fault } func validateGuestID(id string) types.BaseMethodFault { @@ -1022,8 +1100,9 @@ var vmwOUI = net.HardwareAddr([]byte{0x0, 0xc, 0x29}) // From http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.vsphere.networking.doc%2FGUID-DC7478FF-DC44-4625-9AD7-38208C56A552.html // "The host generates generateMAC addresses that consists of the VMware OUI 00:0C:29 and the last three octets in hexadecimal -// format of the virtual machine UUID. The virtual machine UUID is based on a hash calculated by using the UUID of the -// ESXi physical machine and the path to the configuration file (.vmx) of the virtual machine." +// +// format of the virtual machine UUID. The virtual machine UUID is based on a hash calculated by using the UUID of the +// ESXi physical machine and the path to the configuration file (.vmx) of the virtual machine." func (vm *VirtualMachine) generateMAC(unit int32) string { id := []byte(vm.Config.Uuid) @@ -1465,6 +1544,7 @@ func (vm *VirtualMachine) genVmdkPath(p object.DatastorePath) (string, types.Bas func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { devices := object.VirtualDeviceList(vm.Config.Hardware.Device) + var err types.BaseMethodFault for i, change := range spec.DeviceChange { dspec := change.GetVirtualDeviceConfigSpec() device := dspec.Device.GetVirtualDevice() @@ -1501,7 +1581,7 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach } key := device.Key - err := vm.configureDevice(ctx, devices, dspec, nil) + err = vm.configureDevice(ctx, devices, dspec, nil) if err != nil { return err } @@ -1528,7 +1608,7 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach device.DeviceInfo.GetDescription().Summary = "" // regenerate summary } - err := vm.configureDevice(ctx, devices, dspec, oldDevice) + err = vm.configureDevice(ctx, devices, dspec, oldDevice) if err != nil { return err } @@ -1543,9 +1623,16 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach {Name: "config.hardware.device", Val: []types.BaseVirtualDevice(devices)}, }) - vm.updateDiskLayouts() + err = vm.updateDiskLayouts() + if err != nil { + return err + } - vm.applyExtraConfig(spec) // Do this after device config, as some may apply to the devices themselves (e.g. ethernet -> guest.net) + // Do this after device config, as some may apply to the devices themselves (e.g. ethernet -> guest.net) + err = vm.applyExtraConfig(ctx, spec) + if err != nil { + return err + } return nil } @@ -1580,14 +1667,23 @@ func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { return nil, new(types.InvalidState) } - c.run.start(c.ctx, c.VirtualMachine) + err := c.svm.start(c.ctx) + if err != nil { + return nil, &types.MissingPowerOnConfiguration{ + VAppConfigFault: types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}}} + } c.ctx.postEvent( &types.VmStartingEvent{VmEvent: event}, &types.VmPoweredOnEvent{VmEvent: event}, ) c.customize(c.ctx) case types.VirtualMachinePowerStatePoweredOff: - c.run.stop(c.ctx, c.VirtualMachine) + c.svm.stop(c.ctx) c.ctx.postEvent( &types.VmStoppingEvent{VmEvent: event}, &types.VmPoweredOffEvent{VmEvent: event}, @@ -1600,7 +1696,7 @@ func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { } } - c.run.pause(c.ctx, c.VirtualMachine) + c.svm.pause(c.ctx) c.ctx.postEvent( &types.VmSuspendingEvent{VmEvent: event}, &types.VmSuspendedEvent{VmEvent: event}, @@ -1707,7 +1803,7 @@ func (vm *VirtualMachine) RebootGuest(ctx *Context, req *types.RebootGuest) soap } if vm.Guest.ToolsRunningStatus == string(types.VirtualMachineToolsRunningStatusGuestToolsRunning) { - vm.run.restart(ctx, vm) + vm.svm.restart(ctx) body.Res = new(types.RebootGuestResponse) } else { body.Fault_ = Fault("", new(types.ToolsUnavailable)) @@ -1771,6 +1867,7 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa task := CreateTask(vm, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { if dc == nil { return nil, &types.ManagedObjectNotFound{Obj: vm.Self} // If our Parent was destroyed, so were we. + // TODO: should this also trigger container removal? } r := vm.UnregisterVM(ctx, &types.UnregisterVM{ @@ -1795,7 +1892,14 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa Datacenter: &dc.Self, }) - vm.run.remove(vm) + err := vm.svm.remove(ctx) + if err != nil { + return nil, &types.RuntimeFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}} + } return nil, nil }) @@ -1909,6 +2013,7 @@ func (vm *VirtualMachine) CloneVMTask(ctx *Context, req *types.CloneVM_Task) soa } config := types.VirtualMachineConfigSpec{ Name: req.Name, + Version: vm.Config.Version, GuestId: vm.Config.GuestId, Files: &types.VirtualMachineFileInfo{ VmPathName: vmx.String(), @@ -1973,6 +2078,7 @@ func (vm *VirtualMachine) CloneVMTask(ctx *Context, req *types.CloneVM_Task) soa if req.Spec.Config != nil && req.Spec.Config.DeviceChange != nil { clone.configureDevices(ctx, &types.VirtualMachineConfigSpec{DeviceChange: req.Spec.Config.DeviceChange}) } + clone.DataSets = copyDataSetsForVmClone(vm.DataSets) if req.Spec.Template { _ = clone.MarkAsTemplate(&types.MarkAsTemplate{This: clone.Self}) @@ -2178,6 +2284,7 @@ func (vm *VirtualMachine) CreateSnapshotTask(ctx *Context, req *types.CreateSnap snapshot := &VirtualMachineSnapshot{} snapshot.Vm = vm.Reference() snapshot.Config = *vm.Config + snapshot.DataSets = copyDataSetsForVmClone(vm.DataSets) ctx.Map.Put(snapshot) @@ -2235,8 +2342,10 @@ func (vm *VirtualMachine) RevertToCurrentSnapshotTask(ctx *Context, req *types.R return body } + snapshot := ctx.Map.Get(*vm.Snapshot.CurrentSnapshot).(*VirtualMachineSnapshot) task := CreateTask(vm, "revertSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { + vm.DataSets = copyDataSetsForVmClone(snapshot.DataSets) return nil, nil }) @@ -2277,8 +2386,8 @@ func (vm *VirtualMachine) RemoveAllSnapshotsTask(ctx *Context, req *types.Remove func (vm *VirtualMachine) ShutdownGuest(ctx *Context, c *types.ShutdownGuest) soap.HasFault { r := &methods.ShutdownGuestBody{} - // should be poweron - if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOff { + + if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { r.Fault_ = Fault("", &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOn, ExistingState: vm.Runtime.PowerState, @@ -2286,27 +2395,61 @@ func (vm *VirtualMachine) ShutdownGuest(ctx *Context, c *types.ShutdownGuest) so return r } - // change state - vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff - vm.Summary.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff event := vm.event() - ctx.postEvent( - &types.VmGuestShutdownEvent{VmEvent: event}, - &types.VmPoweredOffEvent{VmEvent: event}, - ) - vm.run.stop(ctx, vm) + ctx.postEvent(&types.VmGuestShutdownEvent{VmEvent: event}) - ctx.Map.Update(vm, []types.PropertyChange{ - {Name: "runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, - {Name: "summary.runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, - }) + _ = CreateTask(vm, "shutdownGuest", func(*Task) (types.AnyType, types.BaseMethodFault) { + vm.svm.stop(ctx) + + ctx.Map.Update(vm, []types.PropertyChange{ + {Name: "runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, + {Name: "summary.runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, + }) + + ctx.postEvent(&types.VmPoweredOffEvent{VmEvent: event}) + + return nil, nil + }).Run(ctx) r.Res = new(types.ShutdownGuestResponse) return r } +func (vm *VirtualMachine) StandbyGuest(ctx *Context, c *types.StandbyGuest) soap.HasFault { + r := &methods.StandbyGuestBody{} + + if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { + r.Fault_ = Fault("", &types.InvalidPowerState{ + RequestedState: types.VirtualMachinePowerStatePoweredOn, + ExistingState: vm.Runtime.PowerState, + }) + + return r + } + + event := vm.event() + ctx.postEvent(&types.VmGuestStandbyEvent{VmEvent: event}) + + _ = CreateTask(vm, "standbyGuest", func(*Task) (types.AnyType, types.BaseMethodFault) { + vm.svm.pause(ctx) + + ctx.Map.Update(vm, []types.PropertyChange{ + {Name: "runtime.powerState", Val: types.VirtualMachinePowerStateSuspended}, + {Name: "summary.runtime.powerState", Val: types.VirtualMachinePowerStateSuspended}, + }) + + ctx.postEvent(&types.VmSuspendedEvent{VmEvent: event}) + + return nil, nil + }).Run(ctx) + + r.Res = new(types.StandbyGuestResponse) + + return r +} + func (vm *VirtualMachine) MarkAsTemplate(req *types.MarkAsTemplate) soap.HasFault { r := &methods.MarkAsTemplateBody{} diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager_data.go b/vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager_data.go index 3c42c5350e..8fc34960ff 100644 --- a/vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager_data.go +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/performance_manager_data.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -49,10 +49,10 @@ var VmMetricData = map[int32][]int64{ 185, 189, 134, 130, 160, 122, 84, 113, 153, 95, 110, 141, 91, 108, 130, 3372, 1942, 151, 102, 158, 162, 100, 143, 122, 109, 211, 229, 173, 187, 237, 200, 205, 241, 184, 204, 217, 182, 195, 219, 213, 211, 214, 189, 182, 245, 2671, 612, 1055, 595, 644, 747, 611, 336, 244, 118, 113, 128, 93, 94, 130, 359, 131, 151, 94, 137, 149, 106, 109, 127, 124}, - 70: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + 70: []int64{91, 585, 246, 114, 553, 348, 824, 848, 827, 882, 632, 500, 647, 805, 425, 971, 789, 1001, 910, 1013, 338, 713, 496, 168, 201, 886, 124, 968, 768, 736, + 612, 859, 973, 64, 312, 449, 38, 839, 807, 571, 83, 862, 1015, 333, 818, 173, 396, 520, 171, 678, 160, 203, 991, 549, 776, 524, 390, 228, 576, 307, + 1005, 93, 893, 475, 451, 141, 98, 439, 95, 104, 739, 630, 275, 701, 722, 16, 207, 468, 310, 387, 217, 377, 684, 969, 396, 1010, 866, 914, 181, 621, 995, + 831, 278, 530, 465, 745, 704, 762, 545, 544}, 473: []int64{30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 27, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, @@ -113,10 +113,13 @@ var VmMetricData = map[int32][]int64{ 20, 20, 20, 25, 20, 20, 20, 20, 20, 20, 21, 20, 20, 20, 20, 20, 21, 21, 20, 20, 20, 20, 21, 20, 20, 20, 20, 20, 21, 20, 20, 20, 20, 24, 20, 21, 20, 20, 20, 20, 20, 21, 20, 21, 20, 20, 20, 21, 20, 20, 20, 20, 20, 23, 20, 20, 20, 20, 20, 20, 21, 20, 20, 20, 20, 20, 21, 20, 20, 20}, - 410: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + 410: []int64{976, 899, 657, 904, 171, 606, 607, 707, 823, 331, 255, 421, 230, 1001, + 937, 467, 738, 287, 904, 962, 518, 391, 593, 593, 59, 874, 364, 873, 728, 727, 533, + 328, 957, 637, 973, 1014, 259, 160, 698, 589, 933, 283, 385, 393, 129, 414, 16, 800, + 105, 150, 905, 278, 131, 115, 678, 738, 444, 411, 388, 402, 541, 428, 970, 260, 56, + 794, 975, 480, 644, 110, 702, 93, 240, 322, 651, 370, 261, 589, 72, 259, 405, 965, + 927, 519, 210, 291, 688, 758, 942, 301, 253, 605, 677, 995, 509, 478, 646, 3, 472, 1007, + }, 505: []int64{1100, 1100, 1100, 1100, 1100, 1000, 1000, 1000, 1000, 1000, 1100, 1100, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1300, 1300, 1300, 1100, 1200, 1100, 1100, 1100, 1100, 1100, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1300, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1300, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1200, 1400, 1600, 1800, 2000, 2300, 2500, 2600, 2600, 2600, 2600, 2600, 2600, 2600, 2600, @@ -181,10 +184,7 @@ var VmMetricData = map[int32][]int64{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - 94: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + 94: []int64{341, 881, 851, 214, 263, 802, 777, 58, 661, 231, 255, 494, 192, 302, 90, 371, 709, 164, 58, 1, 511, 711, 1005, 556, 457, 869, 708, 994, 668, 826, 112, 633, 901, 345, 317, 199, 199, 168, 981, 665, 29, 436, 225, 426, 309, 333, 757, 696, 840, 210, 500, 343, 651, 717, 803, 869, 445, 907, 928, 268, 437, 583, 160, 478, 891, 471, 72, 448, 457, 499, 348, 527, 409, 731, 849, 572, 378, 33, 254, 414, 781, 322, 153, 755, 301, 583, 823, 55, 637, 233, 259, 6, 448, 217, 842, 921, 971, 419, 246, 289}, 400: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -305,10 +305,7 @@ var VmMetricData = map[int32][]int64{ 800, 900, 1000, 1000, 1300, 1400, 1300, 900, 800, 900, 1000, 1000, 1000, 800, 800, 2400, 3900, 4100, 2600, 1400, 1200, 1200, 900, 800, 800, 1000, 1100, 1000, 900, 800, 900, 900, 1000, 900, 1200, 1200, 1200, 900, 800, 900, 1100, 1100, 1100, 800, 900, 2000, 2900, 4100, 3800, 4200, 4500, 4700, 3900, 2400, 1400, 1000, 1100, 1100, 900, 800, 1000, 1100, 1200, 900, 1200, 1300, 1400, 1100, 1000, 1000}, - 74: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + 74: []int64{961, 1016, 502, 571, 475, 122, 219, 937, 428, 657, 356, 987, 332, 469, 465, 216, 708, 984, 519, 696, 420, 994, 454, 7, 223, 559, 320, 521, 632, 18, 280, 144, 48, 994, 584, 555, 665, 944, 831, 135, 40, 851, 210, 440, 679, 569, 908, 745, 552, 125, 783, 877, 317, 895, 458, 999, 50, 288, 600, 729, 716, 441, 713, 800, 378, 440, 225, 226, 384, 588, 982, 393, 736, 817, 453, 644, 255, 92, 671, 81, 586, 1019, 286, 247, 781, 524, 765, 762, 217, 941, 595, 478, 597, 294, 648, 327, 1019, 706, 826, 813}, 85: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -357,10 +354,7 @@ var VmMetricData = map[int32][]int64{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - 406: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + 406: []int64{942, 615, 308, 1003, 67, 638, 951, 442, 100, 350, 477, 964, 469, 905, 622, 523, 679, 130, 457, 208, 710, 905, 781, 740, 608, 254, 286, 483, 205, 929, 88, 936, 730, 832, 144, 658, 558, 306, 19, 920, 254, 804, 458, 370, 328, 655, 43, 165, 653, 310, 369, 705, 188, 238, 170, 948, 535, 209, 293, 971, 787, 245, 377, 767, 807, 324, 896, 109, 178, 928, 954, 312, 26, 831, 816, 646, 159, 232, 997, 820, 387, 128, 28, 582, 1010, 705, 662, 815, 830, 946, 750, 637, 600, 847, 732, 566, 562, 406, 311, 609}, 513: []int64{1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1800, 1800, 1900, 1900, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 1900, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 1900, 1900, 1900, 1900, 1900, 1800, 1700, 1800, 1700, 1700, 1700, 1700, 1700, 1700, 1700, 1700, 1700, 1700, 1700, 1700, 2200, 2500, 3600, 3700, 3800, 3900, 3900, 4000, 4000, 4000, 4000, 4000, 4000, 4000, 4000, @@ -373,10 +367,12 @@ var VmMetricData = map[int32][]int64{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - 10: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + 10: []int64{740, 967, 600, 858, 667, 588, 488, 323, 834, 600, 49, 486, 867, 163, 219, 532, + 224, 115, 377, 80, 671, 327, 77, 8, 995, 831, 594, 326, 595, 182, 152, 195, 897, 924, 995, + 393, 126, 296, 678, 494, 752, 198, 199, 184, 412, 600, 19, 454, 605, 481, 456, 54, 487, + 395, 24, 859, 670, 710, 339, 232, 300, 941, 187, 190, 779, 127, 252, 304, 580, 823, 30, + 43, 3, 30, 523, 670, 499, 474, 962, 588, 300, 978, 338, 772, 212, 435, 920, 958, 533, 650, + 39, 668, 185, 124, 851, 226, 356, 594, 247, 194}, 12: []int64{137, 101, 116, 123, 114, 107, 96, 96, 103, 124, 111, 109, 108, 106, 106, 124, 122, 117, 149, 137, 141, 109, 110, 154, 128, 88, 107, 119, 102, 108, 125, 91, 95, 116, 97, 108, 100, 103, 119, 122, 97, 99, 108, 94, 97, 106, 105, 106, 107, 98, 111, 105, 97, 98, 106, 111, 94, 90, 95, 100, 131, 100, 103, 125, 108, 112, 97, 91, 107, 106, 98, 102, 116, 96, 112, 257, 186, 182, 259, 271, 232, 196, 123, 107, 122, 112, 114, 118, 104, 116, @@ -684,11 +680,12 @@ var HostMetricData = map[int32][]int64{ 135, 102, 74, 158, 108, 74, 121, 144, 74, 132, 124, 82, 211, 154, 127, 142, 84, 76, 115, 96, 76, 128, 84, 72, 116, 157, 96, 126, 94, 107, 136, 132, 106, 158, 164, 96, 112, 137, 90, 142, }, - 410: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 410: []int64{976, 899, 657, 904, 171, 606, 607, 707, 823, 331, 255, 421, 230, 1001, + 937, 467, 738, 287, 904, 962, 518, 391, 593, 593, 59, 874, 364, 873, 728, 727, 533, + 328, 957, 637, 973, 1014, 259, 160, 698, 589, 933, 283, 385, 393, 129, 414, 16, 800, + 105, 150, 905, 278, 131, 115, 678, 738, 444, 411, 388, 402, 541, 428, 970, 260, 56, + 794, 975, 480, 644, 110, 702, 93, 240, 322, 651, 370, 261, 589, 72, 259, 405, 965, + 927, 519, 210, 291, 688, 758, 942, 301, 253, 605, 677, 995, 509, 478, 646, 3, 472, 1007, }, 470: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -708,11 +705,12 @@ var HostMetricData = map[int32][]int64{ 58386204, 58386216, 58386220, 58386220, 58388612, 58386220, 58386220, 58386220, 58386220, 58386208, 58386248, 58386248, 58386248, 58386236, 58386248, 58386248, 58386248, 58386248, 58386248, 58386248, 58386248, 58386248, 58386236, 58386248, 58388640, 58386248, 58386240, 58386248, 58386248, 58386248, 58386248, 58388640, 58386248, 58386248, 58386248, 58386236, 58386248, 58386248, 58386248, 58386236, }, - 90: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 90: []int64{6, 644, 90, 376, 809, 98, 902, 998, 526, 633, 973, 1019, 423, 410, 219, + 879, 566, 390, 109, 450, 489, 341, 61, 465, 29, 893, 134, 1022, 703, 73, 477, 976, + 172, 175, 65, 696, 410, 566, 430, 187, 300, 542, 305, 751, 606, 567, 905, 70, 369, + 524, 913, 829, 351, 456, 295, 29, 539, 694, 620, 1010, 441, 904, 706, 954, 777, 221, + 497, 586, 456, 694, 183, 631, 302, 391, 857, 864, 610, 880, 906, 299, 839, 399, 49, + 713, 220, 903, 788, 228, 256, 119, 562, 395, 991, 543, 205, 584, 130, 804, 70, 99, }, 406: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1012,16 +1010,14 @@ var ResourcePoolMetricData = map[int32][]int64{ 1211531, 1206346, 1206131, 1206061, 1205836, 1205722, 1205495, 1205681, 1205711, 1204664, 1204509, 1205405, 1205132, 1204909, 1204796, 1204754, 1204743, 1204720, 1204484, 1204448, 1204636, 1204476, 1204354, 1204331, 1204079, 1203968, 1203899, 1203865, 1203811, 1203734, 1203865, 1203628, 1203251, 1203212, 1203098, 1203057, 1203938, 1203697, 1203608, 1203573, 1203545, 1203465, 1202291, 1201669, 1201782, 1201631, 1201743, 1201529, 1201474, 1201407, 1199122, 1198521, 1198570}, - 70: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - 90: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + 70: []int64{91, 585, 246, 114, 553, 348, 824, 848, 827, 882, 632, 500, 647, 805, 425, 971, 789, 1001, 910, 1013, 338, 713, 496, 168, 201, 886, 124, 968, 768, 736, 612, 859, 973, 64, 312, 449, 38, 839, 807, 571, 83, 862, 1015, 333, 818, 173, 396, 520, 171, 678, 160, 203, 991, 549, 776, 524, 390, 228, 576, 307, 1005, 93, 893, 475, 451, 141, 98, 439, 95, 104, 739, 630, 275, 701, 722, 16, 207, 468, 310, 387, 217, 377, 684, 969, 396, 1010, 866, 914, 181, 621, 995, 831, 278, 530, 465, 745, 704, 762, 545, 544}, + 90: []int64{6, 644, 90, 376, 809, 98, 902, 998, 526, 633, 973, 1019, 423, 410, 219, + 879, 566, 390, 109, 450, 489, 341, 61, 465, 29, 893, 134, 1022, 703, 73, 477, 976, + 172, 175, 65, 696, 410, 566, 430, 187, 300, 542, 305, 751, 606, 567, 905, 70, 369, + 524, 913, 829, 351, 456, 295, 29, 539, 694, 620, 1010, 441, 904, 706, 954, 777, 221, + 497, 586, 456, 694, 183, 631, 302, 391, 857, 864, 610, 880, 906, 299, 839, 399, 49, + 713, 220, 903, 788, 228, 256, 119, 562, 395, 991, 543, 205, 584, 130, 804, 70, 99, + }, 7: []int64{1406, 1419, 1426, 1412, 1413, 1408, 1472, 1426, 1462, 1424, 1447, 1403, 1433, 1429, 1420, 1395, 1447, 1396, 1406, 1413, 1432, 1420, 1425, 1411, 1432, 1437, 1444, 1407, 1448, 1450, 1477, 1431, 1451, 1437, 1403, 1459, 1478, 1452, 1447, 1446, 1410, 1441, 1445, 1415, 1433, 1435, 1458, 1419, 1441, 1476, 1310, 1482, 1451, 1458, 1455, 1428, 1446, 1443, 1436, 1449, diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go b/vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go index 3018d01ed2..c1874c5df6 100644 --- a/vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/service_content.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -24,7 +24,7 @@ import ( // ServiceContent is the default template for the ServiceInstance content property. // Capture method: -// govc object.collect -s -dump - content +// govc object.collect -s -dump - content var ServiceContent = types.ServiceContent{ RootFolder: types.ManagedObjectReference{Type: "Folder", Value: "group-d1"}, PropertyCollector: types.ManagedObjectReference{Type: "PropertyCollector", Value: "propertyCollector"}, diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/setting.go b/vendor/github.com/vmware/govmomi/simulator/vpx/setting.go index 7bbf0c02d6..7625824da6 100644 --- a/vendor/github.com/vmware/govmomi/simulator/vpx/setting.go +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/setting.go @@ -18,6 +18,8 @@ package vpx import "github.com/vmware/govmomi/vim25/types" +// TODO: figure out whether this is Setting or AdvancedOptions - see esx/setting.go for the difference + // Setting is captured from VC's ServiceContent.OptionManager.setting var Setting = []types.BaseOptionValue{ // This list is currently pruned to include sso options only with sso.enabled set to false diff --git a/vendor/github.com/vmware/govmomi/simulator/vpx/task_manager.go b/vendor/github.com/vmware/govmomi/simulator/vpx/task_manager.go index dee36ff71a..a3fcc302d8 100644 --- a/vendor/github.com/vmware/govmomi/simulator/vpx/task_manager.go +++ b/vendor/github.com/vmware/govmomi/simulator/vpx/task_manager.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ import "github.com/vmware/govmomi/vim25/types" // Description is the default template for the TaskManager description property. // Capture method: -// govc object.collect -s -dump TaskManager:TaskManager description +// govc object.collect -s -dump TaskManager:TaskManager description var Description = types.TaskDescription{ MethodInfo: []types.BaseElementDescription{ &types.ElementDescription{ diff --git a/vendor/github.com/vmware/govmomi/sts/client.go b/vendor/github.com/vmware/govmomi/sts/client.go index d98c560954..c51ec2a00a 100644 --- a/vendor/github.com/vmware/govmomi/sts/client.go +++ b/vendor/github.com/vmware/govmomi/sts/client.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ import ( "context" "crypto/tls" "errors" + "fmt" "net/url" "time" + internalhelpers "github.com/vmware/govmomi/internal" "github.com/vmware/govmomi/lookup" "github.com/vmware/govmomi/lookup/types" "github.com/vmware/govmomi/sts/internal" @@ -31,8 +33,10 @@ import ( ) const ( - Namespace = "oasis:names:tc:SAML:2.0:assertion" - Path = "/sts/STSService" + Namespace = "oasis:names:tc:SAML:2.0:assertion" + basePath = "/sts" + Path = basePath + "/STSService" + SystemPath = basePath + "/system-STSService/sdk" ) // Client is a soap.Client targeting the STS (Secure Token Service) API endpoint. @@ -42,11 +46,16 @@ type Client struct { RoundTripper soap.RoundTripper } -// NewClient returns a client targeting the STS API endpoint. -// The Client.URL will be set to that of the Lookup Service's endpoint registration, -// as the SSO endpoint can be external to vCenter. If the Lookup Service is not available, -// URL defaults to Path on the vim25.Client.URL.Host. -func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { +func getEndpointURL(ctx context.Context, c *vim25.Client) string { + // Services running on vCenter can bypass lookup service using the + // system-STSService path. This avoids the need to lookup the system domain. + if usingSidecar := internalhelpers.UsingEnvoySidecar(c); usingSidecar { + return fmt.Sprintf("http://%s%s", c.URL().Host, SystemPath) + } + return getEndpointURLFromLookupService(ctx, c) +} + +func getEndpointURLFromLookupService(ctx context.Context, c *vim25.Client) string { filter := &types.LookupServiceRegistrationFilter{ ServiceType: &types.LookupServiceRegistrationServiceType{ Product: "com.vmware.cis", @@ -58,7 +67,16 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { }, } - url := lookup.EndpointURL(ctx, c, Path, filter) + return lookup.EndpointURL(ctx, c, Path, filter) +} + +// NewClient returns a client targeting the STS API endpoint. +// The Client.URL will be set to that of the Lookup Service's endpoint registration, +// as the SSO endpoint can be external to vCenter. If the Lookup Service is not available, +// URL defaults to Path on the vim25.Client.URL.Host. +func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { + + url := getEndpointURL(ctx, c) sc := c.Client.NewServiceClient(url, Namespace) return &Client{sc, sc}, nil diff --git a/vendor/github.com/vmware/govmomi/task/wait.go b/vendor/github.com/vmware/govmomi/task/wait.go index b78f5110d9..d52458e667 100644 --- a/vendor/github.com/vmware/govmomi/task/wait.go +++ b/vendor/github.com/vmware/govmomi/task/wait.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -113,7 +113,6 @@ func (t *taskCallback) fn(pc []types.PropertyChange) bool { // The detail for the progress update is set to an empty string. If the task // finishes in the error state, the error instance is passed through as well. // Note that this error is the same error that is returned by this function. -// func Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Collector, s progress.Sinker) (*types.TaskInfo, error) { cb := &taskCallback{} diff --git a/vendor/github.com/vmware/govmomi/toolbox/hgfs/server.go b/vendor/github.com/vmware/govmomi/toolbox/hgfs/server.go index efc3faf212..1c887e9748 100644 --- a/vendor/github.com/vmware/govmomi/toolbox/hgfs/server.go +++ b/vendor/github.com/vmware/govmomi/toolbox/hgfs/server.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -198,9 +198,9 @@ func (s *Server) OpenFile(name string, mode int32) (File, error) { // Note that callers on the VMX side that reach this path are only concerned with: // - does the file exist? // - size: -// + used for UI progress with desktop Drag-N-Drop operations, which toolbox does not support. -// + sent to as Content-Length header in response to GET of FileTransferInformation.Url, -// if the first ReadV3 size is > HGFS_LARGE_PACKET_MAX +// + used for UI progress with desktop Drag-N-Drop operations, which toolbox does not support. +// + sent to as Content-Length header in response to GET of FileTransferInformation.Url, +// if the first ReadV3 size is > HGFS_LARGE_PACKET_MAX func (s *Server) Stat(name string) (os.FileInfo, error) { u := urlParse(name) diff --git a/vendor/github.com/vmware/govmomi/vapi/internal/internal.go b/vendor/github.com/vmware/govmomi/vapi/internal/internal.go index f6584c569b..2872f38030 100644 --- a/vendor/github.com/vmware/govmomi/vapi/internal/internal.go +++ b/vendor/github.com/vmware/govmomi/vapi/internal/internal.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2018-2022 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -43,7 +43,6 @@ const ( TrustedCertificatesPath = "/api/content/trusted-certificates" VCenterOVFLibraryItem = "/com/vmware/vcenter/ovf/library-item" VCenterVMTXLibraryItem = "/vcenter/vm-template/library-items" - VCenterVM = "/vcenter/vm" SessionCookieName = "vmware-api-session-id" UseHeaderAuthn = "vmware-use-header-authn" DebugEcho = "/vc-sim/debug/echo" @@ -59,7 +58,10 @@ type AssociatedObject struct { // Reference implements mo.Reference func (o AssociatedObject) Reference() types.ManagedObjectReference { - return types.ManagedObjectReference(o) + return types.ManagedObjectReference{ + Type: o.Type, + Value: o.Value, + } } // Association for tag-association requests. @@ -69,9 +71,11 @@ type Association struct { // NewAssociation returns an Association, converting ref to an AssociatedObject. func NewAssociation(ref mo.Reference) Association { - obj := AssociatedObject(ref.Reference()) return Association{ - ObjectID: &obj, + ObjectID: &AssociatedObject{ + Type: ref.Reference().Type, + Value: ref.Reference().Value, + }, } } diff --git a/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go b/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go index 33b2209366..ca8818191b 100644 --- a/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go +++ b/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go @@ -81,7 +81,11 @@ func (c *Manager) AttachTagToMultipleObjects(ctx context.Context, tagID string, var ids []internal.AssociatedObject for i := range refs { - ids = append(ids, internal.AssociatedObject(refs[i].Reference())) + ref := refs[i].Reference() + ids = append(ids, internal.AssociatedObject{ + Type: ref.Type, + Value: ref.Value, + }) } spec := struct { @@ -116,7 +120,10 @@ func (c *Manager) AttachMultipleTagsToObject(ctx context.Context, tagIDs []strin } } - obj := internal.AssociatedObject(ref.Reference()) + obj := internal.AssociatedObject{ + Type: ref.Reference().Type, + Value: ref.Reference().Value, + } spec := struct { ObjectID internal.AssociatedObject `json:"object_id"` TagIDs []string `json:"tag_ids"` @@ -166,7 +173,10 @@ func (c *Manager) DetachMultipleTagsFromObject(ctx context.Context, tagIDs []str } } - obj := internal.AssociatedObject(ref.Reference()) + obj := internal.AssociatedObject{ + Type: ref.Reference().Type, + Value: ref.Reference().Value, + } spec := struct { ObjectID internal.AssociatedObject `json:"object_id"` TagIDs []string `json:"tag_ids"` @@ -337,7 +347,11 @@ func (t *AttachedTags) UnmarshalJSON(b []byte) error { func (c *Manager) ListAttachedTagsOnObjects(ctx context.Context, objectID []mo.Reference) ([]AttachedTags, error) { var ids []internal.AssociatedObject for i := range objectID { - ids = append(ids, internal.AssociatedObject(objectID[i].Reference())) + ref := objectID[i].Reference() + ids = append(ids, internal.AssociatedObject{ + Type: ref.Type, + Value: ref.Value, + }) } spec := struct { diff --git a/vendor/github.com/vmware/govmomi/vapi/vm/dataset/dataset.go b/vendor/github.com/vmware/govmomi/vapi/vm/dataset/dataset.go new file mode 100644 index 0000000000..cb178d6fbf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vapi/vm/dataset/dataset.go @@ -0,0 +1,200 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dataset + +import ( + "context" + "net/http" + "net/url" + "path" + "strconv" + + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vapi/vm/internal" +) + +// Manager extends rest.Client, adding data set related methods. +// +// Data sets functionality was introduced in vSphere 8.0, +// and requires the VM to have virtual hardware version 20 or newer. +// +// See the VMware Guest SDK Programming Guide for details on using data sets +// from within the guest OS of a VM. +// +// See https://developer.vmware.com/apis/vsphere-automation/latest/vcenter/vm/data_sets/ +type Manager struct { + *rest.Client +} + +// NewManager creates a new Manager instance with the given client. +func NewManager(client *rest.Client) *Manager { + return &Manager{ + Client: client, + } +} + +// Access permission to the entries of a data set. +type Access string + +const ( + AccessNone = Access("NONE") + AccessReadOnly = Access("READ_ONLY") + AccessReadWrite = Access("READ_WRITE") +) + +// Describes a data set to be created. +type CreateSpec struct { + // Name should take the form "com.company.project" to avoid conflict with other uses. + // Must not be empty. + Name string `json:"name"` + + // Description of the data set. + Description string `json:"description"` + + // Host controls access to the data set entries by the ESXi host and the vCenter. + // For example, if the host access is set to NONE, the entries of this data set + // will not be accessible through the vCenter API. + // Must not be empty. + Host Access `json:"host"` + + // Guest controls access to the data set entries by the guest OS of the VM (i.e. in-guest APIs). + // For example, if the guest access is set to READ_ONLY, it will be forbidden + // to create, delete, and update entries in this data set via the VMware Guest SDK. + // Must not be empty. + Guest Access `json:"guest"` + + // OmitFromSnapshotAndClone controls whether the data set is included in snapshots and clones of the VM. + // When a VM is reverted to a snapshot, any data set with OmitFromSnapshotAndClone=true will be destroyed. + // Default is false. + OmitFromSnapshotAndClone *bool `json:"omit_from_snapshot_and_clone,omitempty"` +} + +// Describes modifications to a data set. +type UpdateSpec struct { + Description *string `json:"description,omitempty"` + Host *Access `json:"host,omitempty"` + Guest *Access `json:"guest,omitempty"` + OmitFromSnapshotAndClone *bool `json:"omit_from_snapshot_and_clone,omitempty"` +} + +// Data set information. +type Info struct { + Name string `json:"name"` + Description string `json:"description"` + Host Access `json:"host"` + Guest Access `json:"guest"` + Used int `json:"used"` + OmitFromSnapshotAndClone bool `json:"omit_from_snapshot_and_clone"` +} + +// Brief data set information. +type Summary struct { + DataSet string `json:"data_set"` + Name string `json:"name"` + Description string `json:"description"` +} + +const dataSetsPathField = "data-sets" + +func dataSetPath(vm string, dataSet string) string { + return path.Join(internal.VCenterVMPath, url.PathEscape(vm), dataSetsPathField, url.PathEscape(dataSet)) +} + +func dataSetsPath(vm string) string { + return path.Join(internal.VCenterVMPath, url.PathEscape(vm), dataSetsPathField) +} + +const entriesPathField = "entries" + +func entryPath(vm string, dataSet string, key string) string { + return path.Join(internal.VCenterVMPath, url.PathEscape(vm), dataSetsPathField, url.PathEscape(dataSet), entriesPathField, url.PathEscape(key)) +} + +func entriesPath(vm string, dataSet string) string { + return path.Join(internal.VCenterVMPath, url.PathEscape(vm), dataSetsPathField, url.PathEscape(dataSet), entriesPathField) +} + +// CreateDataSet creates a data set associated with the given virtual machine. +func (c *Manager) CreateDataSet(ctx context.Context, vm string, spec *CreateSpec) (string, error) { + url := c.Resource(dataSetsPath(vm)) + var res string + err := c.Do(ctx, url.Request(http.MethodPost, spec), &res) + return res, err +} + +// DeleteDataSet deletes an existing data set from the given virtual machine. +// The operation will fail if the data set is not empty. +// Set the force flag to delete a non-empty data set. +func (c *Manager) DeleteDataSet(ctx context.Context, vm string, dataSet string, force bool) error { + url := c.Resource(dataSetPath(vm, dataSet)) + if force { + url.WithParam("force", strconv.FormatBool(force)) + } + return c.Do(ctx, url.Request(http.MethodDelete), nil) +} + +// GetDataSet retrieves information about the given data set. +func (c *Manager) GetDataSet(ctx context.Context, vm string, dataSet string) (*Info, error) { + url := c.Resource(dataSetPath(vm, dataSet)) + var res Info + err := c.Do(ctx, url.Request(http.MethodGet), &res) + return &res, err +} + +// UpdateDataSet modifies the given data set. +func (c *Manager) UpdateDataSet(ctx context.Context, vm string, dataSet string, spec *UpdateSpec) error { + url := c.Resource(dataSetPath(vm, dataSet)) + return c.Do(ctx, url.Request(http.MethodPatch, spec), nil) +} + +// ListDataSets returns a list of brief descriptions of the data sets on with the given virtual machine. +func (c *Manager) ListDataSets(ctx context.Context, vm string) ([]Summary, error) { + url := c.Resource(dataSetsPath(vm)) + var res []Summary + err := c.Do(ctx, url.Request(http.MethodGet), &res) + return res, err +} + +// SetEntry creates or updates an entry in the given data set. +// If an entry with the given key already exists, it will be overwritten. +// The key can be at most 4096 bytes. The value can be at most 1MB. +func (c *Manager) SetEntry(ctx context.Context, vm string, dataSet string, key string, value string) error { + url := c.Resource(entryPath(vm, dataSet, key)) + return c.Do(ctx, url.Request(http.MethodPut, value), nil) +} + +// GetEntry returns the value of the data set entry with the given key. +func (c *Manager) GetEntry(ctx context.Context, vm string, dataSet string, key string) (string, error) { + url := c.Resource(entryPath(vm, dataSet, key)) + var res string + err := c.Do(ctx, url.Request(http.MethodGet), &res) + return res, err +} + +// DeleteEntry removes an existing entry from the given data set. +func (c *Manager) DeleteEntry(ctx context.Context, vm string, dataSet string, key string) error { + url := c.Resource(entryPath(vm, dataSet, key)) + return c.Do(ctx, url.Request(http.MethodDelete), nil) +} + +// ListEntries returns a list of all entry keys in the given data set. +func (c *Manager) ListEntries(ctx context.Context, vm string, dataSet string) ([]string, error) { + url := c.Resource(entriesPath(vm, dataSet)) + var res []string + err := c.Do(ctx, url.Request(http.MethodGet), &res) + return res, err +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/doc.go b/vendor/github.com/vmware/govmomi/vapi/vm/internal/internal.go similarity index 55% rename from vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/doc.go rename to vendor/github.com/vmware/govmomi/vapi/vm/internal/internal.go index d8a1f5dad2..b016caa1a7 100644 --- a/vendor/github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1/doc.go +++ b/vendor/github.com/vmware/govmomi/vapi/vm/internal/internal.go @@ -1,11 +1,11 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +groupName=snapshot.storage.k8s.io +package internal -package v1beta1 +const ( + // VCenterVMPath is the REST endpoint for the virtual machine API + VCenterVMPath = "/api/vcenter/vm" + // LegacyVCenterVMPath is the legacy REST endpoint for the virtual machine API, relative to the "/rest" base path + LegacyVCenterVMPath = "/vcenter/vm" +) diff --git a/vendor/github.com/vmware/govmomi/vim25/client.go b/vendor/github.com/vmware/govmomi/vim25/client.go index b14cea8520..6101330953 100644 --- a/vendor/github.com/vmware/govmomi/vim25/client.go +++ b/vendor/github.com/vmware/govmomi/vim25/client.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015-2016 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +32,7 @@ import ( const ( Namespace = "vim25" - Version = "7.0" + Version = "8.0.2.0" Path = "/sdk" ) diff --git a/vendor/github.com/vmware/govmomi/vim25/json/LICENSE b/vendor/github.com/vmware/govmomi/vim25/json/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmware/govmomi/vim25/json/README.md b/vendor/github.com/vmware/govmomi/vim25/json/README.md new file mode 100644 index 0000000000..6cbe80349a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/json/README.md @@ -0,0 +1,9 @@ +# JSON with Discriminators + +The source code in this directory was copied from Go 1.17.13's `encoding/json` package in order to add support for JSON discriminators. Please use the following command to review the diff: + +```shell +C1="$(git log --pretty=format:'%h' --no-patch --grep='Vendor Go 1.17.13 encoding/json')" && \ +C2="$(git log --pretty=format:'%h' --no-patch --grep='JSON Encoding w Discriminator Support')" && \ +git diff "${C1}".."${C2}" +``` diff --git a/vendor/github.com/vmware/govmomi/vim25/json/decode.go b/vendor/github.com/vmware/govmomi/vim25/json/decode.go new file mode 100644 index 0000000000..6a92a2410b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/json/decode.go @@ -0,0 +1,1319 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + + discriminatorTypeFieldName string + discriminatorValueFieldName string + discriminatorToTypeFn DiscriminatorToTypeFunc +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 && !d.isDiscriminatorSet() { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + if d.isDiscriminatorSet() { + return d.discriminatorInterfaceDecode(t, v) + } + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + if i, ok := fields.nameIndex[string(key)]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + if !d.isDiscriminatorSet() || kv.String() != d.discriminatorTypeFieldName { + v.SetMapIndex(kv, subv) + } + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + if v.Type() == numberType && !isValidNumber(string(s)) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val interface{}) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/vmware/govmomi/vim25/json/discriminator.go b/vendor/github.com/vmware/govmomi/vim25/json/discriminator.go new file mode 100644 index 0000000000..ce315dd527 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/json/discriminator.go @@ -0,0 +1,568 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "sync" +) + +// DiscriminatorToTypeFunc is used to get a reflect.Type from its +// discriminator. +type DiscriminatorToTypeFunc func(discriminator string) (reflect.Type, bool) + +// TypeToDiscriminatorFunc is used to get a discriminator string from a +// reflect.Type. Empty return value suppresses discriminator rendering. +type TypeToDiscriminatorFunc func(reflect.Type) (discriminator string) + +// DefaultDiscriminatorFunc is shorthand for the ShortName func and is used when +// no other discriminator func is set explicitly +var DefaultDiscriminatorFunc = ShortName + +// ShortName returns the type name in golang without the package name +func ShortName(t reflect.Type) (discriminator string) { + tn := t.Name() + if tn == "" { + return t.String() + } + return tn +} + +// FullName return the name of the type prefixed with the package name as +// appropriate +func FullName(t reflect.Type) (discriminator string) { + tn := t.Name() + if tn == "" { + return t.String() + } + if pp := t.PkgPath(); pp != "" { + return fmt.Sprintf("%s.%s", pp, tn) + } + return tn +} + +// DiscriminatorEncodeMode is a mask that describes the different encode +// options. +type DiscriminatorEncodeMode uint8 + +const ( + // DiscriminatorEncodeTypeNameRootValue causes the type name to be encoded + // for the root value. + DiscriminatorEncodeTypeNameRootValue DiscriminatorEncodeMode = 1 << iota + + // DiscriminatorEncodeTypeNameAllObjects causes the type name to be encoded + // for all struct and map values. Please note this specifically does not + // apply to the root value. + DiscriminatorEncodeTypeNameAllObjects + + // DiscriminatorEncodeTypeNameIfRequired is the default behavior when + // the discriminator is set, and the type name is only encoded if required. + DiscriminatorEncodeTypeNameIfRequired DiscriminatorEncodeMode = 0 +) + +func (m DiscriminatorEncodeMode) root() bool { + return m&DiscriminatorEncodeTypeNameRootValue > 0 +} + +func (m DiscriminatorEncodeMode) all() bool { + return m&DiscriminatorEncodeTypeNameAllObjects > 0 +} + +func (d *decodeState) isDiscriminatorSet() bool { + return d.discriminatorTypeFieldName != "" && + d.discriminatorValueFieldName != "" +} + +// discriminatorOpType describes the current operation related to +// discriminators when reading a JSON object's fields. +type discriminatorOpType uint8 + +const ( + // discriminatorOpTypeNameField indicates the discriminator type name + // field was discovered. + discriminatorOpTypeNameField = iota + 1 + + // discriminatorOpValueField indicates the discriminator value field + // was discovered. + discriminatorOpValueField +) + +func (d *decodeState) discriminatorGetValue() (reflect.Value, error) { + // Record the current offset so we know where the data starts. + offset := d.readIndex() + + // Create a temporary decodeState used to inspect the current object + // and determine its discriminator type and decode its value. + dd := &decodeState{ + disallowUnknownFields: d.disallowUnknownFields, + useNumber: d.useNumber, + discriminatorToTypeFn: d.discriminatorToTypeFn, + discriminatorTypeFieldName: d.discriminatorTypeFieldName, + discriminatorValueFieldName: d.discriminatorValueFieldName, + } + dd.init(append([]byte{}, d.data[offset:]...)) + defer freeScanner(&dd.scan) + dd.scan.reset() + + var ( + t reflect.Type // the instance of the type + valueOff = -1 // the offset of a possible discriminator value + ) + + dd.scanWhile(scanSkipSpace) + if dd.opcode != scanBeginObject { + panic(phasePanicMsg) + } + + for { + dd.scanWhile(scanSkipSpace) + if dd.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if dd.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := dd.readIndex() + dd.rescanLiteral() + item := dd.data[start:dd.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Check to see if the key is related to the discriminator. + var discriminatorOp discriminatorOpType + switch key { + case d.discriminatorTypeFieldName: + discriminatorOp = discriminatorOpTypeNameField + case d.discriminatorValueFieldName: + discriminatorOp = discriminatorOpValueField + } + + // Read : before value. + if dd.opcode == scanSkipSpace { + dd.scanWhile(scanSkipSpace) + } + + if dd.opcode != scanObjectKey { + panic(phasePanicMsg) + } + dd.scanWhile(scanSkipSpace) + + // Read value. + valOff := dd.readIndex() + val := dd.valueInterface() + + switch discriminatorOp { + case discriminatorOpTypeNameField: + tn, ok := val.(string) + if !ok { + return reflect.Value{}, fmt.Errorf( + "json: discriminator type at offset %d is not string", + offset+valOff) + } + if tn == "" { + return reflect.Value{}, fmt.Errorf( + "json: discriminator type at offset %d is empty", + offset+valOff) + } + + // Parse the type name into a type instance. + ti, err := discriminatorParseTypeName(tn, d.discriminatorToTypeFn) + if err != nil { + return reflect.Value{}, err + } + + // Assign the type instance to the outer variable, t. + t = ti + + // Primitive types and types with Unmarshaler are wrapped in a + // structure with type and value fields. Structures and Maps not + // implementing Unmarshaler use discriminator embedded within their + // content. + if useNestedDiscriminator(t) { + // If the type is a map or a struct not implementing Unmarshaler + // then it is not necessary to continue walking over the current + // JSON object since it will be completely re-scanned to decode + // its value into the discovered type. + dd.opcode = scanEndObject + } else { + // Otherwise if the value offset has been discovered then it is + // safe to stop walking over the current JSON object as well. + if valueOff > -1 { + dd.opcode = scanEndObject + } + } + case discriminatorOpValueField: + valueOff = valOff + + // If the type has been discovered then it is safe to stop walking + // over the current JSON object. + if t != nil { + dd.opcode = scanEndObject + } + } + + // Next token must be , or }. + if dd.opcode == scanSkipSpace { + dd.scanWhile(scanSkipSpace) + } + if dd.opcode == scanEndObject { + break + } + if dd.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + + // If there is not a type discriminator then return early. + if t == nil { + return reflect.Value{}, fmt.Errorf("json: missing discriminator") + } + + // Instantiate a new instance of the discriminated type. + var v reflect.Value + switch t.Kind() { + case reflect.Slice: + // MakeSlice returns a value that is not addressable. + // Instead, use MakeSlice to get the type, then use + // reflect.New to create an addressable value. + v = reflect.New(reflect.MakeSlice(t, 0, 0).Type()).Elem() + case reflect.Map: + // MakeMap returns a value that is not addressable. + // Instead, use MakeMap to get the type, then use + // reflect.New to create an addressable value. + v = reflect.New(reflect.MakeMap(t).Type()).Elem() + case reflect.Complex64, reflect.Complex128: + return reflect.Value{}, fmt.Errorf("json: unsupported discriminator type: %s", t.Kind()) + default: + v = reflect.New(t) + } + + // Reset the decode state to prepare for decoding the data. + dd.scan.reset() + + if useNestedDiscriminator(t) { + // Set the offset to zero since the entire object will be decoded + // into v. + dd.off = 0 + } else { + // Set the offset to what it was before the discriminator value was + // read so only the value field is decoded into v. + dd.off = valueOff + } + // This will initialize the correct scan step and op code. + dd.scanWhile(scanSkipSpace) + + // Decode the data into the value. + if err := dd.value(v); err != nil { + return reflect.Value{}, err + } + + // Check the saved error as well since the decoder.value function does not + // always return an error. If the reflected value is still zero, then it is + // likely the decoder was unable to decode the value. + if err := dd.savedError; err != nil { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + v = v.Elem() + } + if v.IsZero() { + return reflect.Value{}, err + } + } + + return v, nil +} + +func (d *decodeState) discriminatorInterfaceDecode(t reflect.Type, v reflect.Value) error { + + defer func() { + // Advance the decode state, throwing away the value. + _ = d.objectInterface() + }() + + dv, err := d.discriminatorGetValue() + if err != nil { + return err + } + + switch dv.Kind() { + case reflect.Map, reflect.Slice: + if dv.Type().AssignableTo(t) { + v.Set(dv) + return nil + } + if pdv := dv.Addr(); pdv.Type().AssignableTo(t) { + v.Set(pdv) + return nil + } + case reflect.Ptr: + if dve := dv.Elem(); dve.Type().AssignableTo(t) { + v.Set(dve) + return nil + } + if dv.Type().AssignableTo(t) { + v.Set(dv) + return nil + } + } + + return fmt.Errorf("json: unsupported discriminator kind: %s", dv.Kind()) +} + +func (o encOpts) isDiscriminatorSet() bool { + return o.discriminatorTypeFieldName != "" && + o.discriminatorValueFieldName != "" +} + +func discriminatorInterfaceEncode(e *encodeState, v reflect.Value, opts encOpts) { + v = v.Elem() + + if v.Type().Implements(marshalerType) { + discriminatorValue := opts.discriminatorValueFn(v.Type()) + if discriminatorValue == "" { + marshalerEncoder(e, v, opts) + } + e.WriteString(`{"`) + e.WriteString(opts.discriminatorTypeFieldName) + e.WriteString(`":"`) + e.WriteString(discriminatorValue) + e.WriteString(`","`) + e.WriteString(opts.discriminatorValueFieldName) + e.WriteString(`":`) + marshalerEncoder(e, v, opts) + e.WriteByte('}') + return + } + + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Invalid: + e.error(&UnsupportedValueError{v, fmt.Sprintf("invalid kind: %s", v.Kind())}) + case reflect.Map: + e.discriminatorEncodeTypeName = true + newMapEncoder(v.Type())(e, v, opts) + case reflect.Struct: + e.discriminatorEncodeTypeName = true + newStructEncoder(v.Type())(e, v, opts) + case reflect.Ptr: + discriminatorInterfaceEncode(e, v, opts) + default: + discriminatorValue := opts.discriminatorValueFn(v.Type()) + if discriminatorValue == "" { + e.reflectValue(v, opts) + return + } + e.WriteString(`{"`) + e.WriteString(opts.discriminatorTypeFieldName) + e.WriteString(`":"`) + e.WriteString(discriminatorValue) + e.WriteString(`","`) + e.WriteString(opts.discriminatorValueFieldName) + e.WriteString(`":`) + e.reflectValue(v, opts) + e.WriteByte('}') + } +} + +func discriminatorMapEncode(e *encodeState, v reflect.Value, opts encOpts) { + if !e.discriminatorEncodeTypeName && !opts.discriminatorEncodeMode.all() { + return + } + discriminatorValue := opts.discriminatorValueFn(v.Type()) + if discriminatorValue == "" { + return + } + e.WriteByte('"') + e.WriteString(opts.discriminatorTypeFieldName) + e.WriteString(`":"`) + e.WriteString(discriminatorValue) + e.WriteByte('"') + if v.Len() > 0 { + e.WriteByte(',') + } + e.discriminatorEncodeTypeName = false +} + +func discriminatorStructEncode(e *encodeState, v reflect.Value, opts encOpts) byte { + if !e.discriminatorEncodeTypeName && !opts.discriminatorEncodeMode.all() { + return '{' + } + discriminatorValue := opts.discriminatorValueFn(v.Type()) + if discriminatorValue == "" { + return '{' + } + e.WriteString(`{"`) + e.WriteString(opts.discriminatorTypeFieldName) + e.WriteString(`":"`) + e.WriteString(discriminatorValue) + e.WriteByte('"') + e.discriminatorEncodeTypeName = false + return ',' +} + +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +// Discriminator is nested in map and struct unless they implement Unmarshaler. +func useNestedDiscriminator(t reflect.Type) bool { + if t.Implements(unmarshalerType) || reflect.PtrTo(t).Implements(unmarshalerType) { + return false + } + kind := t.Kind() + if kind == reflect.Struct || kind == reflect.Map { + return true + } + return false +} + +var discriminatorTypeRegistry = map[string]reflect.Type{ + "uint": reflect.TypeOf(uint(0)), + "uint8": reflect.TypeOf(uint8(0)), + "uint16": reflect.TypeOf(uint16(0)), + "uint32": reflect.TypeOf(uint32(0)), + "uint64": reflect.TypeOf(uint64(0)), + "uintptr": reflect.TypeOf(uintptr(0)), + "int": reflect.TypeOf(int(0)), + "int8": reflect.TypeOf(int8(0)), + "int16": reflect.TypeOf(int16(0)), + "int32": reflect.TypeOf(int32(0)), + "int64": reflect.TypeOf(int64(0)), + "float32": reflect.TypeOf(float32(0)), + "float64": reflect.TypeOf(float64(0)), + "bool": reflect.TypeOf(true), + "string": reflect.TypeOf(""), + "any": reflect.TypeOf((*interface{})(nil)).Elem(), + "interface{}": reflect.TypeOf((*interface{})(nil)).Elem(), + "interface {}": reflect.TypeOf((*interface{})(nil)).Elem(), + + // Not supported, but here to prevent the decoder from panicing + // if encountered. + "complex64": reflect.TypeOf(complex64(0)), + "complex128": reflect.TypeOf(complex128(0)), +} + +// discriminatorPointerTypeCache caches the pointer type for another type. +// For example, a key that was the int type would have a value that is the +// *int type. +var discriminatorPointerTypeCache sync.Map // map[reflect.Type]reflect.Type + +// cachedPointerType returns the pointer type for another and avoids repeated +// work by using a cache. +func cachedPointerType(t reflect.Type) reflect.Type { + if value, ok := discriminatorPointerTypeCache.Load(t); ok { + return value.(reflect.Type) + } + pt := reflect.New(t).Type() + value, _ := discriminatorPointerTypeCache.LoadOrStore(t, pt) + return value.(reflect.Type) +} + +var ( + mapPatt = regexp.MustCompile(`^\*?map\[([^\]]+)\](.+)$`) + arrayPatt = regexp.MustCompile(`^\*?\[(\d+)\](.+)$`) + slicePatt = regexp.MustCompile(`^\*?\[\](.+)$`) +) + +// discriminatorParseTypeName returns a reflect.Type for the given type name. +func discriminatorParseTypeName( + typeName string, + typeFn DiscriminatorToTypeFunc) (reflect.Type, error) { + + // Check to see if the type is an array, map, or slice. + var ( + aln = -1 // array length + etn string // map or slice element type name + ktn string // map key type name + ) + if m := arrayPatt.FindStringSubmatch(typeName); len(m) > 0 { + i, err := strconv.Atoi(m[1]) + if err != nil { + return nil, err + } + aln = i + etn = m[2] + } else if m := slicePatt.FindStringSubmatch(typeName); len(m) > 0 { + etn = m[1] + } else if m := mapPatt.FindStringSubmatch(typeName); len(m) > 0 { + ktn = m[1] + etn = m[2] + } + + // indirectTypeName checks to see if the type name begins with a + // "*" characters. If it does, then the type name sans the "*" + // character is returned along with a true value indicating the + // type is a pointer. Otherwise the original type name is returned + // along with a false value. + indirectTypeName := func(tn string) (string, bool) { + if len(tn) > 1 && tn[0] == '*' { + return tn[1:], true + } + return tn, false + } + + lookupType := func(tn string) (reflect.Type, bool) { + // Get the actual type name and a flag indicating whether the + // type is a pointer. + n, p := indirectTypeName(tn) + + var t reflect.Type + ok := false + // look up the type in the external registry to allow name override. + if typeFn != nil { + t, ok = typeFn(n) + } + if !ok { + // Use the built-in registry if the external registry fails + if t, ok = discriminatorTypeRegistry[n]; !ok { + return nil, false + } + } + // If the type was a pointer then get the type's pointer type. + if p { + t = cachedPointerType(t) + } + return t, true + } + + var t reflect.Type + + if ktn == "" && etn != "" { + et, ok := lookupType(etn) + if !ok { + return nil, fmt.Errorf("json: invalid array/slice element type: %s", etn) + } + if aln > -1 { + // Array + t = reflect.ArrayOf(aln, et) + } else { + // Slice + t = reflect.SliceOf(et) + } + } else if ktn != "" && etn != "" { + // Map + kt, ok := lookupType(ktn) + if !ok { + return nil, fmt.Errorf("json: invalid map key type: %s", ktn) + } + et, ok := lookupType(etn) + if !ok { + return nil, fmt.Errorf("json: invalid map element type: %s", etn) + } + t = reflect.MapOf(kt, et) + } else { + var ok bool + if t, ok = lookupType(typeName); !ok { + return nil, fmt.Errorf("json: invalid discriminator type: %s", typeName) + } + } + + return t, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/json/encode.go b/vendor/github.com/vmware/govmomi/vim25/json/encode.go new file mode 100644 index 0000000000..0c8aa202fe --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/json/encode.go @@ -0,0 +1,1453 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML