diff --git a/hack/ci-e2e.sh b/hack/ci-e2e.sh index 9bd9ac2789..9733301c45 100755 --- a/hack/ci-e2e.sh +++ b/hack/ci-e2e.sh @@ -11,6 +11,8 @@ set -eux REPO_ROOT=$(realpath "$(dirname "${BASH_SOURCE[0]}")/..") +BMO_IMAGE_NAME="quay.io/metal3-io/baremetal-operator" + cd "${REPO_ROOT}" || exit 1 @@ -25,7 +27,7 @@ export PATH="${PATH}:/usr/local/go/bin" "${REPO_ROOT}/hack/e2e/ensure_kubectl.sh" # Build the container image with e2e tag (used in tests) -IMG=quay.io/metal3-io/baremetal-operator:e2e make docker +IMG="${BMO_IMAGE_NAME}:e2e" make docker # Set up minikube minikube start --driver=kvm2 @@ -41,8 +43,8 @@ virsh -c qemu:///system attach-interface --domain minikube --mac="52:54:00:6c:3c minikube stop minikube start -# Load the BMO e2e image into it -minikube image load quay.io/metal3-io/baremetal-operator:e2e +# Load the local BMO images into it +minikube image load "${BMO_IMAGE_NAME}:e2e" # Create libvirt domain VM_NAME="bmo-e2e-0" @@ -122,6 +124,24 @@ docker run --name image-server-e2e -d \ -p 80:8080 \ -v "${IMAGE_DIR}:/usr/share/nginx/html" nginxinc/nginx-unprivileged +BMO_RELEASE_BRANCH="${BMO_RELEASE_BRANCH:-release-0.4}" +BMO_RELEASE_ROOT="/tmp/bmo-${BMO_RELEASE_BRANCH}" +BMO_ORIGIN_ADDRESS="https://github.com/metal3-io/baremetal-operator.git" +export BMO_UPGRADE_FROM_IMAGE="${BMO_IMAGE_NAME}:${BMO_RELEASE_BRANCH}" +BMO_E2E_KUSTOMIZATION="${REPO_ROOT}/config/overlays/e2e" + +if [[ ! -d "${BMO_RELEASE_ROOT}" ]]; then + git clone --branch "${BMO_RELEASE_BRANCH}" --single-branch "${BMO_ORIGIN_ADDRESS}" "${BMO_RELEASE_ROOT}" +fi + +export BMO_UPGRADE_FROM_KUSTOMIZATION="${BMO_RELEASE_ROOT}/config/overlays/e2e" + +if [[ ! -d "${BMO_UPGRADE_FROM_KUSTOMIZATION}" ]]; then + mkdir -p "${BMO_UPGRADE_FROM_KUSTOMIZATION}" + cp -r "${BMO_E2E_KUSTOMIZATION}/"* "${BMO_UPGRADE_FROM_KUSTOMIZATION}/" + sed -i "s/newTag: e2e/newTag: ${BMO_RELEASE_BRANCH}/" "${BMO_UPGRADE_FROM_KUSTOMIZATION}/kustomization.yaml" +fi + # We need to gather artifacts/logs before exiting also if there are errors set +e diff --git a/test/e2e/common.go b/test/e2e/common.go index 234c431379..eb9c418962 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -14,6 +14,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metal3api "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" @@ -221,3 +222,20 @@ func buildKustomizeManifest(source string) ([]byte, error) { } return resources.AsYaml() } + +func DeploymentRolledOut(ctx context.Context, clusterProxy framework.ClusterProxy, name string, namespace string, desiredGeneration int64) bool { + clientSet := clusterProxy.GetClientSet() + deploy, err := clientSet.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + Expect(err).To(BeNil()) + if deploy != nil { + // When the number of replicas is equal to the number of available and updated + // replicas, we know that only "new" pods are running. When we also + // have the desired number of replicas and a new enough generation, we + // know that the rollout is complete. + return (deploy.Status.UpdatedReplicas == *deploy.Spec.Replicas) && + (deploy.Status.AvailableReplicas == *deploy.Spec.Replicas) && + (deploy.Status.Replicas == *deploy.Spec.Replicas) && + (deploy.Status.ObservedGeneration >= desiredGeneration) + } + return false +} diff --git a/test/e2e/config/fixture.yaml b/test/e2e/config/fixture.yaml index 8d5d65d8cf..3ae9c5500c 100644 --- a/test/e2e/config/fixture.yaml +++ b/test/e2e/config/fixture.yaml @@ -25,6 +25,7 @@ variables: IMAGE_URL: "http://192.168.222.1/cirros-0.6.2-x86_64-disk.img" IMAGE_CHECKSUM: "c8fc807773e5354afe61636071771906" CERT_MANAGER_VERSION: "v1.13.0" + RUN_UPGRADE_TESTS: "false" intervals: inspection/wait-unmanaged: ["1m", "10ms"] diff --git a/test/e2e/config/ironic.yaml b/test/e2e/config/ironic.yaml index 7c930b76f0..137d198c0e 100644 --- a/test/e2e/config/ironic.yaml +++ b/test/e2e/config/ironic.yaml @@ -32,6 +32,7 @@ variables: IMAGE_URL: "http://192.168.222.1/cirros-0.6.2-x86_64-disk.img" IMAGE_CHECKSUM: "c8fc807773e5354afe61636071771906" CERT_MANAGER_VERSION: "v1.13.0" + RUN_UPGRADE_TESTS: "true" intervals: inspection/wait-unmanaged: ["1m", "5s"] diff --git a/test/e2e/upgrade_test.go b/test/e2e/upgrade_test.go new file mode 100644 index 0000000000..31410ce220 --- /dev/null +++ b/test/e2e/upgrade_test.go @@ -0,0 +1,110 @@ +package e2e + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "path/filepath" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/test/framework/bootstrap" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/test/framework" +) + +var _ = Describe("BMO Upgrade", func() { + var ( + bmoNamespace string + bmoDeployName string + upgradeClusterProvider bootstrap.ClusterProvider + upgradeClusterProxy framework.ClusterProxy + ) + BeforeEach(func() { + if e2eConfig.GetVariable("RUN_UPGRADE_TESTS") != "true" { + Skip("Upgrade tests are skipped") + } + bmoNamespace = "baremetal-operator-system" + bmoDeployName = "baremetal-operator-controller-manager" + bmoUpgradeFromImageName := e2eConfig.GetVariable("BMO_UPGRADE_FROM_IMAGE") + bmoUpgradeFromImage := clusterctl.ContainerImage{ + Name: bmoUpgradeFromImageName, + LoadBehavior: clusterctl.MustLoadImage, + } + + By("Creating a separate cluster for upgrade tests") + upgradeClusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ + Name: "bmo-e2e-upgrade", + Images: append(e2eConfig.Images, bmoUpgradeFromImage), + }) + Expect(upgradeClusterProvider).ToNot(BeNil(), "Failed to create a cluster") + kubeconfigPath := upgradeClusterProvider.GetKubeconfigPath() + scheme := runtime.NewScheme() + framework.TryAddDefaultSchemes(scheme) + upgradeClusterProxy = framework.NewClusterProxy("bmo-e2e-upgrade", kubeconfigPath, scheme) + DeferCleanup(func() { + upgradeClusterProxy.Dispose(ctx) + upgradeClusterProvider.Dispose(ctx) + }) + By("Installing cert-manager on the upgrade cluster") + cmVersion := e2eConfig.GetVariable("CERT_MANAGER_VERSION") + err := installCertManager(ctx, upgradeClusterProxy, cmVersion) + Expect(err).NotTo(HaveOccurred()) + By("Waiting for cert-manager webhook") + Eventually(func() error { + return checkCertManagerWebhook(ctx, upgradeClusterProxy) + }, e2eConfig.GetIntervals("default", "wait-available")...).Should(Succeed()) + err = checkCertManagerAPI(upgradeClusterProxy) + Expect(err).NotTo(HaveOccurred()) + + By("Installing BMO on the upgrade cluster") + kustomization := e2eConfig.GetVariable("BMO_UPGRADE_FROM_KUSTOMIZATION") + manifest, err := buildKustomizeManifest(kustomization) + Expect(err).NotTo(HaveOccurred()) + err = upgradeClusterProxy.Apply(ctx, manifest) + Expect(err).NotTo(HaveOccurred()) + + bmoDeployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baremetal-operator-controller-manager", + Namespace: "baremetal-operator-system", + }, + } + // Wait for it to become available + framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + Getter: upgradeClusterProxy.GetClient(), + Deployment: bmoDeployment, + }, e2eConfig.GetIntervals("default", "wait-deployment")...) + // Set up log watcher + framework.WatchDeploymentLogsByName(ctx, framework.WatchDeploymentLogsByNameInput{ + GetLister: upgradeClusterProxy.GetClient(), + Cache: upgradeClusterProxy.GetCache(ctx), + ClientSet: upgradeClusterProxy.GetClientSet(), + Deployment: bmoDeployment, + LogPath: filepath.Join(artifactFolder, "logs", bmoDeployment.GetNamespace()), + }) + }) + + It("Should upgrade BMO to latest version", func() { + By("Upgrading BMO deployment") + bmoKustomization := e2eConfig.GetVariable("BMO_KUSTOMIZATION") + clientSet := upgradeClusterProxy.GetClientSet() + deploy, err := clientSet.AppsV1().Deployments(bmoNamespace).Get(ctx, bmoDeployName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + manifest, err := buildKustomizeManifest(bmoKustomization) + Expect(err).NotTo(HaveOccurred()) + err = upgradeClusterProxy.Apply(ctx, manifest) + Expect(err).NotTo(HaveOccurred()) + By("Waiting for BMO update to rollout") + Eventually(func() bool { + return DeploymentRolledOut(ctx, upgradeClusterProxy, bmoDeployName, bmoNamespace, deploy.Status.ObservedGeneration+1) + }, + e2eConfig.GetIntervals("default", "wait-deployment")..., + ).Should(BeTrue()) + }) + + AfterEach(func() { + upgradeClusterProxy.Dispose(ctx) + upgradeClusterProvider.Dispose(ctx) + }) + +})