Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable the E2E test on Github Action #3912

Merged
merged 1 commit into from Jul 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
113 changes: 113 additions & 0 deletions .github/workflows/e2e-test-kind.yaml
@@ -0,0 +1,113 @@
name: "Run the E2E test on kind"
on:
push:
pull_request:
# Do not run when the change only includes these directories.
paths-ignore:
- "site/**"
- "design/**"
jobs:
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
build:
runs-on: ubuntu-latest
steps:
# Look for a CLI that's made for this PR
- name: Fetch built CLI
id: cli-cache
uses: actions/cache@v2
with:
path: ./_output/bin/linux/amd64/velero
# The cache key a combination of the current PR number and the commit SHA
key: velero-cli-${{ github.event.pull_request.number }}-${{ github.sha }}
- name: Fetch built image
id: image-cache
uses: actions/cache@v2
with:
path: ./velero.tar
# The cache key a combination of the current PR number and the commit SHA
key: velero-image-${{ github.event.pull_request.number }}-${{ github.sha }}
- name: Fetch cached go modules
uses: actions/cache@v2
if: steps.cli-cache.outputs.cache-hit != 'true'
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Check out the code
uses: actions/checkout@v2
if: steps.cli-cache.outputs.cache-hit != 'true' || steps.image-cache.outputs.cache-hit != 'true'
# If no binaries were built for this PR, build it now.
- name: Build Velero CLI
if: steps.cli-cache.outputs.cache-hit != 'true'
run: |
make local
# If no image were built for this PR, build it now.
- name: Build Velero Image
if: steps.image-cache.outputs.cache-hit != 'true'
run: |
IMAGE=velero VERSION=pr-test make container
docker save velero:pr-test -o ./velero.tar
# Run E2E test against all kubernetes versions on kind
run-e2e-test:
needs: build
runs-on: ubuntu-latest
strategy:
matrix:
k8s:
# doesn't cover 1.15 as 1.15 doesn't support "apiextensions.k8s.io/v1" that is needed for the case
#- 1.15.12
- 1.16.15
- 1.17.17
- 1.18.15
- 1.19.7
- 1.20.2
- 1.21.1
fail-fast: false
dsu-igeek marked this conversation as resolved.
Show resolved Hide resolved
steps:
- name: Check out the code
uses: actions/checkout@v2
- name: Install MinIO
run:
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
- uses: engineerd/setup-kind@v0.5.0
with:
version: "v0.11.1"
image: "kindest/node:v${{ matrix.k8s }}"
- name: Fetch built CLI
id: cli-cache
uses: actions/cache@v2
with:
path: ./_output/bin/linux/amd64/velero
key: velero-cli-${{ github.event.pull_request.number }}-${{ github.sha }}
- name: Fetch built Image
id: image-cache
uses: actions/cache@v2
with:
path: ./velero.tar
key: velero-image-${{ github.event.pull_request.number }}-${{ github.sha }}
- name: Load Velero Image
run:
kind load image-archive velero.tar
# always try to fetch the cached go modules as the e2e test needs it either
- name: Fetch cached go modules
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Run E2E test
run: |
cat << EOF > /tmp/credential
[default]
aws_access_key_id=minio
aws_secret_access_key=minio123
EOF
GOPATH=~/go CLOUD_PROVIDER=kind \
OBJECT_STORE_PROVIDER=aws BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
CREDS_FILE=/tmp/credential BSL_BUCKET=bucket \
ADDITIONAL_OBJECT_STORE_PROVIDER=aws ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
ADDITIONAL_CREDS_FILE=/tmp/credential ADDITIONAL_BSL_BUCKET=additional-bucket \
VELERO_IMAGE=velero:pr-test \
make -C test/e2e run
1 change: 1 addition & 0 deletions changelogs/unreleased/3912-ywk253100
@@ -0,0 +1 @@
Run the E2E test with kind(provision various versions of k8s cluster) and MinIO on Github Action
27 changes: 10 additions & 17 deletions test/e2e/kibishii_tests.go
Expand Up @@ -18,7 +18,6 @@ package e2e

import (
"fmt"
"os"
"os/exec"
"strconv"
"time"
Expand All @@ -40,29 +39,23 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform string
// We use kustomize to generate YAML for Kibishii from the checked-in yaml directories
kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k",
"github.com/vmware-tanzu-experiments/distributed-data-generator/kubernetes/yaml/"+cloudPlatform)

_, _, err := veleroexec.RunCommand(kibishiiInstallCmd)
_, stderr, err := veleroexec.RunCommand(kibishiiInstallCmd)
if err != nil {
return errors.Wrap(err, "failed to install kibishii")
return errors.Wrapf(err, "failed to install kibishii, stderr=%s", stderr)
}

kibishiiSetWaitCmd := exec.CommandContext(ctx, "kubectl", "rollout", "status", "statefulset.apps/kibishii-deployment",
"-n", namespace, "-w", "--timeout=30m")
kibishiiSetWaitCmd.Stdout = os.Stdout
kibishiiSetWaitCmd.Stderr = os.Stderr
_, _, err = veleroexec.RunCommand(kibishiiSetWaitCmd)

_, stderr, err = veleroexec.RunCommand(kibishiiSetWaitCmd)
if err != nil {
return err
return errors.Wrapf(err, "failed to rollout, stderr=%s", stderr)
}

fmt.Printf("Waiting for kibishii jump-pad pod to be ready\n")
jumpPadWaitCmd := exec.CommandContext(ctx, "kubectl", "wait", "--for=condition=ready", "-n", namespace, "pod/jump-pad")
jumpPadWaitCmd.Stdout = os.Stdout
jumpPadWaitCmd.Stderr = os.Stderr
_, _, err = veleroexec.RunCommand(jumpPadWaitCmd)
_, stderr, err = veleroexec.RunCommand(jumpPadWaitCmd)
if err != nil {
return errors.Wrapf(err, "Failed to wait for ready status of pod %s/%s", namespace, jumpPadPod)
return errors.Wrapf(err, "Failed to wait for ready status of pod %s/%s, stderr=%s", namespace, jumpPadPod, stderr)
}

return err
Expand All @@ -75,9 +68,9 @@ func generateData(ctx context.Context, namespace string, levels int, filesPerLev
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
fmt.Printf("kibishiiGenerateCmd cmd =%v\n", kibishiiGenerateCmd)

_, _, err := veleroexec.RunCommand(kibishiiGenerateCmd)
_, stderr, err := veleroexec.RunCommand(kibishiiGenerateCmd)
if err != nil {
return errors.Wrap(err, "failed to generate")
return errors.Wrapf(err, "failed to generate, stderr=%s", stderr)
}

return nil
Expand All @@ -90,9 +83,9 @@ func verifyData(ctx context.Context, namespace string, levels int, filesPerLevel
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd)

_, _, err := veleroexec.RunCommand(kibishiiVerifyCmd)
_, stderr, err := veleroexec.RunCommand(kibishiiVerifyCmd)
if err != nil {
return errors.Wrap(err, "failed to verify")
return errors.Wrapf(err, "failed to verify, stderr=%s", stderr)
}
return nil
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/velero_utils.go
Expand Up @@ -426,7 +426,7 @@ func veleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNa

installPluginCmd := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "plugin", "add", plugin)
installPluginCmd.Stdout = stdoutBuf
installPluginCmd.Stderr = stdoutBuf
installPluginCmd.Stderr = stderrBuf

err := installPluginCmd.Run()

Expand Down