Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump velero to 1.9.2 #84

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions hack/config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,12 @@
#See the License for the specific language governing permissions and
#limitations under the License.

# KUBEVIRT variables have to be set before common.sh is sourced
KUBEVIRT_MEMORY_SIZE=${KUBEVIRT_MEMORY_SIZE:-9216M}
KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-k8s-1.23}
KUBEVIRT_DEPLOY_CDI=true
KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-v0.57.0}
KUBEVIRT_DEPLOYMENT_TIMEOUT=${KUBEVIRT_DEPLOYMENT_TIMEOUT:-480}

if [ -f cluster-up/hack/common.sh ]; then
source cluster-up/hack/common.sh
Expand Down Expand Up @@ -45,13 +50,8 @@ _ssh=${KUBEVIRTCI_PATH}ssh.sh
DEPLOYMENT_TIMEOUT=600
USE_CSI=${USE_CSI:-1}
USE_RESTIC=${USE_RESTIC:-0}
CSI_PLUGIN=${CSI_PLUGIN:-velero/velero-plugin-for-csi:v0.2.0}

KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-v0.57.0}
KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-k8s-1.23}
KUBEVIRT_DEPLOYMENT_TIMEOUT=${KUBEVIRT_DEPLOYMENT_TIMEOUT:-480}
KUBEVIRT_DEPLOY_CDI=true
VELERO_VERSION=${VELERO_VERSION:-v1.8.1}
CSI_PLUGIN=${CSI_PLUGIN:-velero/velero-plugin-for-csi:v0.3.1}
VELERO_VERSION=${VELERO_VERSION:-v1.9.2}
VELERO_DIR=_output/velero/bin

source cluster-up/hack/config.sh
Expand Down
2 changes: 1 addition & 1 deletion hack/velero/deploy-velero.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ fi

kvp::fetch_velero

PLUGINS=velero/velero-plugin-for-aws:v1.3.0
PLUGINS=velero/velero-plugin-for-aws:v1.5.1
FEATURES=""

if [[ "${USE_CSI}" == "1" ]]; then
Expand Down
3 changes: 2 additions & 1 deletion tests/framework/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,10 @@ func CreateBackupForSelector(ctx context.Context, backupName, selector, snapshot
return nil
}

func CreateBackupForResources(ctx context.Context, backupName, resources, snapshotLocation string, backupNamespace string, wait bool) error {
func CreateBackupForResources(ctx context.Context, backupName, resources, includedNamespace, snapshotLocation string, backupNamespace string, wait bool) error {
args := []string{
"create", "backup", backupName,
"--include-namespaces", includedNamespace,
"--include-resources", resources,
"--namespace", backupNamespace,
}
Expand Down
45 changes: 29 additions & 16 deletions tests/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,29 @@ import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"time"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt-velero-plugin/pkg/util"
"os"
"path/filepath"
"sort"
"strconv"
"time"
)

const (
veleroEntityUriTemplate = "apis/velero.io/v1/namespaces/%s/%s/"
veleroBackup = "backups"
veleroRestore = "restores"
backupNamespaceEnv = "KVP_BACKUP_NS"
regionEnv = "KVP_REGION"
storageClassEnv = "KVP_STORAGE_CLASS"
veleroEntityUriTemplate = "apis/velero.io/v1/namespaces/%s/%s/"
volumeSnapshotEntityUriTemplate = "apis/snapshot.storage.k8s.io/v1/namespaces/%s/%s/"
volumeSnapshotEntityClusterUriTemplate = "apis/snapshot.storage.k8s.io/v1/%s/"
veleroBackup = "backups"
veleroRestore = "restores"
backupNamespaceEnv = "KVP_BACKUP_NS"
regionEnv = "KVP_REGION"
storageClassEnv = "KVP_STORAGE_CLASS"

defaultRegionName = "minio"
defaultBackupNamespace = "velero"
Expand Down Expand Up @@ -142,6 +145,8 @@ func (r *KubernetesReporter) Dump(duration time.Duration) {

r.logRestores(kubeCli)
r.logBackups(kubeCli)
r.logVolumeSnapshots(kubeCli)
r.logVolumeSnapshotContents(kubeCli)

r.logLogs(kubeCli, since)
}
Expand All @@ -168,9 +173,7 @@ func (r *KubernetesReporter) logObjects(elements interface{}, name string) {
fmt.Fprintln(f, string(j))
}

func (r *KubernetesReporter) dumpK8sEntityToFile(kubeCli kubernetes.Interface, entityName string, namespace string, entityURITemplate string) {
requestURI := fmt.Sprintf(entityURITemplate, namespace, entityName)

func (r *KubernetesReporter) dumpK8sEntityToFile(kubeCli kubernetes.Interface, entityName string, requestURI string) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_%s.log", r.FailureCount, entityName)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
Expand Down Expand Up @@ -341,9 +344,19 @@ func (r *KubernetesReporter) logVMs(kvClient kubecli.KubevirtClient) {
}

func (r *KubernetesReporter) logBackups(kubeCli kubernetes.Interface) {
r.dumpK8sEntityToFile(kubeCli, veleroBackup, v1.NamespaceAll, veleroEntityUriTemplate)
r.dumpK8sEntityToFile(kubeCli, veleroBackup, fmt.Sprintf(veleroEntityUriTemplate, v1.NamespaceAll, veleroBackup))
}

func (r *KubernetesReporter) logRestores(kubeCli kubernetes.Interface) {
r.dumpK8sEntityToFile(kubeCli, veleroRestore, v1.NamespaceAll, veleroEntityUriTemplate)
r.dumpK8sEntityToFile(kubeCli, veleroRestore, fmt.Sprintf(veleroEntityUriTemplate, v1.NamespaceAll, veleroBackup))
}

func (r *KubernetesReporter) logVolumeSnapshots(kubeCli kubernetes.Interface) {
entityName := "volumesnapshots"
r.dumpK8sEntityToFile(kubeCli, entityName, fmt.Sprintf(volumeSnapshotEntityUriTemplate, v1.NamespaceAll, entityName))
}

func (r *KubernetesReporter) logVolumeSnapshotContents(kubeCli kubernetes.Interface) {
entityName := "volumesnapshotcontents"
r.dumpK8sEntityToFile(kubeCli, entityName, fmt.Sprintf(volumeSnapshotEntityClusterUriTemplate, entityName))
}
6 changes: 3 additions & 3 deletions tests/framework/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func FindDataVolume(kvClient kubecli.KubevirtClient, namespace string, dataVolum

// WaitForDataVolumePhase waits for DV's phase to be in a particular phase (Pending, Bound, or Lost)
func WaitForDataVolumePhase(kvClient kubecli.KubevirtClient, namespace string, phase cdiv1.DataVolumePhase, dataVolumeName string) error {
ginkgo.By(fmt.Sprintf("INFO: Waiting for status %s\n", phase))
ginkgo.By(fmt.Sprintf("INFO: Waiting for status %s", phase))
var lastPhase cdiv1.DataVolumePhase

err := wait.PollImmediate(pollInterval, waitTime, func() (bool, error) {
Expand All @@ -106,12 +106,12 @@ func WaitForDataVolumePhase(kvClient kubecli.KubevirtClient, namespace string, p
if dataVolume.Status.Phase != phase {
if dataVolume.Status.Phase != lastPhase {
lastPhase = dataVolume.Status.Phase
ginkgo.By(fmt.Sprintf("\nINFO: Waiting for status %s, got %s", phase, dataVolume.Status.Phase))
ginkgo.By(fmt.Sprintf("INFO: Waiting for status %s, got %s", phase, dataVolume.Status.Phase))
}
return false, err
}

ginkgo.By(fmt.Sprintf("\nINFO: Waiting for status %s, got %s\n", phase, dataVolume.Status.Phase))
ginkgo.By(fmt.Sprintf("INFO: Waiting for status %s, got %s", phase, dataVolume.Status.Phase))
return true, nil
})
if err != nil {
Expand Down
10 changes: 5 additions & 5 deletions tests/framework/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ func DeleteVirtualMachineInstance(client kubecli.KubevirtClient, namespace, name
}

func WaitForVirtualMachineInstanceCondition(client kubecli.KubevirtClient, namespace, name string, conditionType v1.VirtualMachineInstanceConditionType) (bool, error) {
ginkgo.By(fmt.Sprintf("Waiting for %s condition\n", conditionType))
ginkgo.By(fmt.Sprintf("Waiting for %s condition", conditionType))
var result bool

err := wait.PollImmediate(pollInterval, waitTime, func() (bool, error) {
Expand All @@ -290,7 +290,7 @@ func WaitForVirtualMachineInstanceCondition(client kubecli.KubevirtClient, names
if condition.Type == conditionType && condition.Status == k8sv1.ConditionTrue {
result = true

ginkgo.By(fmt.Sprintf(" got %s\n", conditionType))
ginkgo.By(fmt.Sprintf(" got %s", conditionType))
return true, nil
}
}
Expand All @@ -311,15 +311,15 @@ func WaitForVirtualMachineInstancePhase(client kubecli.KubevirtClient, namespace
return false, err
}

ginkgo.By(fmt.Sprintf("INFO: Waiting for status %s, got %s\n", phase, vmi.Status.Phase))
ginkgo.By(fmt.Sprintf("INFO: Waiting for status %s, got %s", phase, vmi.Status.Phase))
return vmi.Status.Phase == phase, nil
})

return err
}

func WaitForVirtualMachineStatus(client kubecli.KubevirtClient, namespace, name string, statuses ...v1.VirtualMachinePrintableStatus) error {
ginkgo.By(fmt.Sprintf("Waiting for any of %s statuses\n", statuses))
ginkgo.By(fmt.Sprintf("Waiting for any of %s statuses", statuses))

err := wait.PollImmediate(pollInterval, waitTime, func() (bool, error) {
vm, err := client.VirtualMachine(namespace).Get(name, &metav1.GetOptions{})
Expand All @@ -332,7 +332,7 @@ func WaitForVirtualMachineStatus(client kubecli.KubevirtClient, namespace, name

for _, status := range statuses {
if vm.Status.PrintableStatus == status {
ginkgo.By(fmt.Sprintf(" got %s\n", status))
ginkgo.By(fmt.Sprintf(" got %s", status))

return true, nil
}
Expand Down
Loading