Skip to content

Commit

Permalink
e2e/lifecycle: decentralized settings
Browse files Browse the repository at this point in the history
Tests settings should be defined in the test source code itself
because conceptually the framework is a separate entity that not all
test authors can modify.

For the sake of backwards compatibility the name of the command line
flags are not changed.
  • Loading branch information
pohly committed Sep 11, 2018
1 parent c053067 commit 91a90b6
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 24 deletions.
4 changes: 0 additions & 4 deletions test/e2e/framework/test_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,9 @@ type TestContextType struct {
MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
UpgradeTarget string
EtcdUpgradeStorage string
EtcdUpgradeVersion string
IngressUpgradeImage string
UpgradeImage string
GCEUpgradeScript string
ContainerRuntime string
ContainerRuntimeEndpoint string
Expand Down Expand Up @@ -278,10 +276,8 @@ func RegisterClusterFlags() {
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
Expand Down
46 changes: 26 additions & 20 deletions test/e2e/lifecycle/cluster_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package lifecycle

import (
"encoding/xml"
"flag"
"fmt"
"os"
"path/filepath"
Expand All @@ -39,6 +40,11 @@ import (
. "github.com/onsi/ginkgo"
)

var (
upgradeTarget = flag.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
upgradeImage = flag.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
)

var upgradeTests = []upgrades.Test{
&upgrades.ServiceUpgradeTest{},
&upgrades.SecretUpgradeTest{},
Expand Down Expand Up @@ -89,7 +95,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
testFrameworks := createUpgradeFrameworks(upgradeTests)
Describe("master upgrade", func() {
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "Master upgrade"}
Expand All @@ -112,7 +118,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {

Describe("node upgrade", func() {
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "Node upgrade"}
Expand All @@ -125,7 +131,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, nodeUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.NodeUpgrade, upgradeFunc)
Expand All @@ -134,7 +140,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {

Describe("cluster upgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "Cluster upgrade"}
Expand All @@ -146,7 +152,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
Expand All @@ -163,7 +169,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {

Describe("cluster downgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "Cluster downgrade"}
Expand All @@ -175,7 +181,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
defer finalizeUpgradeTest(start, clusterDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
Expand Down Expand Up @@ -268,7 +274,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
testFrameworks := createUpgradeFrameworks(gpuUpgradeTests)
Describe("master upgrade", func() {
It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "GPU master upgrade"}
Expand All @@ -286,7 +292,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
})
Describe("cluster upgrade", func() {
It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "GPU cluster upgrade"}
Expand All @@ -298,15 +304,15 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
Describe("cluster downgrade", func() {
It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "GPU cluster downgrade"}
Expand All @@ -316,7 +322,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, gpuDowngradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
Expand All @@ -334,7 +340,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
testFrameworks := createUpgradeFrameworks(statefulsetUpgradeTests)
framework.KubeDescribe("stateful upgrade", func() {
It("should maintain a functioning cluster", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "Stateful upgrade"}
Expand All @@ -346,7 +352,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
Expand All @@ -365,7 +371,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)

It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "kube-proxy upgrade"}
Expand All @@ -381,7 +387,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, true))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
Expand All @@ -392,7 +398,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)

It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

testSuite := &junit.TestSuite{Name: "kube-proxy downgrade"}
Expand All @@ -407,7 +413,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, false))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
Expand Down Expand Up @@ -496,7 +502,7 @@ func runUpgradeSuite(
upgradeType upgrades.UpgradeType,
upgradeFunc func(),
) {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)

cm := chaosmonkey.New(upgradeFunc)
Expand Down Expand Up @@ -569,7 +575,7 @@ func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*u

upgCtx.Versions = append(upgCtx.Versions, upgrades.VersionContext{
Version: *nextVer,
NodeImage: framework.TestContext.UpgradeImage,
NodeImage: *upgradeImage,
})

return upgCtx, nil
Expand Down

0 comments on commit 91a90b6

Please sign in to comment.