Skip to content

Commit

Permalink
fixed gomega created from parent test context
Browse files Browse the repository at this point in the history
  • Loading branch information
muraee committed Sep 8, 2023
1 parent 32f3ced commit 922ef19
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 18 deletions.
3 changes: 1 addition & 2 deletions test/e2e/autoscaling_test.go
Expand Up @@ -21,14 +21,13 @@ import (

func TestAutoscaling(t *testing.T) {
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(testContext)
defer cancel()

clusterOpts := globalOpts.DefaultClusterOptions(t)

e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
// Get the newly created NodePool
nodepools := &hyperv1.NodePoolList{}
if err := mgtClient.List(ctx, nodepools, crclient.InNamespace(hostedCluster.Namespace)); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/chaos_test.go
Expand Up @@ -38,7 +38,7 @@ func TestHAEtcdChaos(t *testing.T) {
clusterOpts.ControlPlaneAvailabilityPolicy = string(hyperv1.HighlyAvailable)
clusterOpts.NodePoolReplicas = 0

e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
t.Run("SingleMemberRecovery", testSingleMemberRecovery(ctx, mgtClient, hostedCluster))
t.Run("KillRandomMembers", testKillRandomMembers(ctx, mgtClient, hostedCluster))
t.Run("KillAllMembers", testKillAllMembers(ctx, mgtClient, hostedCluster))
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/control_plane_upgrade_test.go
Expand Up @@ -15,7 +15,6 @@ import (

func TestUpgradeControlPlane(t *testing.T) {
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(testContext)
defer cancel()
Expand All @@ -26,7 +25,7 @@ func TestUpgradeControlPlane(t *testing.T) {
clusterOpts.ReleaseImage = globalOpts.PreviousReleaseImage
clusterOpts.ControlPlaneAvailabilityPolicy = string(hyperv1.HighlyAvailable)

e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
// Sanity check the cluster by waiting for the nodes to report ready
t.Logf("Waiting for guest client to become available")
guestClient := e2eutil.WaitForGuestClient(t, ctx, mgtClient, hostedCluster)
Expand Down
13 changes: 7 additions & 6 deletions test/e2e/create_cluster_test.go
Expand Up @@ -48,7 +48,6 @@ func TestCreateClusterCustomConfig(t *testing.T) {
t.Skip("test only supported on platform AWS")
}
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(testContext)
defer cancel()
Expand All @@ -57,12 +56,14 @@ func TestCreateClusterCustomConfig(t *testing.T) {

// find kms key ARN using alias
kmsKeyArn, err := e2eutil.GetKMSKeyArn(clusterOpts.AWSPlatform.AWSCredentialsFile, clusterOpts.AWSPlatform.Region, globalOpts.configurableClusterOptions.AWSKmsKeyAlias)
g.Expect(err).NotTo(HaveOccurred(), "failed to retrieve kms key arn")
g.Expect(kmsKeyArn).NotTo(BeNil(), "failed to retrieve kms key arn")
if err != nil || kmsKeyArn == nil {
t.Fatal("failed to retrieve kms key arn")
}

clusterOpts.AWSPlatform.EtcdKMSKeyARN = *kmsKeyArn

e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {

g.Expect(hostedCluster.Spec.SecretEncryption.KMS.AWS.ActiveKey.ARN).To(Equal(*kmsKeyArn))
g.Expect(hostedCluster.Spec.SecretEncryption.KMS.AWS.Auth.AWSKMSRoleARN).ToNot(BeEmpty())

Expand All @@ -81,7 +82,7 @@ func TestNoneCreateCluster(t *testing.T) {

clusterOpts := globalOpts.DefaultClusterOptions(t)

e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
// Wait for the rollout to be reported complete
t.Logf("Waiting for cluster rollout. Image: %s", globalOpts.LatestReleaseImage)
// Since the None platform has no workers, CVO will not have expectations set,
Expand Down Expand Up @@ -121,7 +122,7 @@ func TestCreateClusterPrivate(t *testing.T) {
clusterOpts.ControlPlaneAvailabilityPolicy = string(hyperv1.SingleReplica)
clusterOpts.AWSPlatform.EndpointAccess = string(hyperv1.Private)

e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
// Private -> publicAndPrivate
t.Run("SwitchFromPrivateToPublic", testSwitchFromPrivateToPublic(ctx, mgtClient, hostedCluster, &clusterOpts))
// publicAndPrivate -> Private
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/nodepool_test.go
Expand Up @@ -34,7 +34,6 @@ type NodePoolTestCase struct {

func TestNodePool(t *testing.T) {
t.Parallel()
g := NewWithT(t)

ctx, cancel := context.WithCancel(testContext)
defer cancel()
Expand All @@ -44,7 +43,7 @@ func TestNodePool(t *testing.T) {
// We set replicas to 0 in order to allow the inner tests to
// create their own NodePools with the proper replicas
clusterOpts.NodePoolReplicas = 0
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
hostedClusterClient := e2eutil.WaitForGuestClient(t, ctx, mgtClient, hostedCluster)

// Get the newly created defautlt NodePool
Expand Down
4 changes: 1 addition & 3 deletions test/e2e/olm_test.go
Expand Up @@ -38,14 +38,12 @@ func TestOLM(t *testing.T) {
t.SkipNow()
t.Parallel()

g := NewWithT(t)

ctx, cancel := context.WithCancel(testContext)
defer cancel()

// Create a cluster
clusterOpts := globalOpts.DefaultClusterOptions(t)
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
// Get guest client
t.Logf("Waiting for guest client to become available")
guestClient := e2eutil.WaitForGuestClient(t, ctx, mgtClient, hostedCluster)
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/util/hypershift_framework.go
Expand Up @@ -18,7 +18,7 @@ import (
. "github.com/onsi/gomega"
)

type hypershiftTestFunc func(t *testing.T, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster)
type hypershiftTestFunc func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster)
type hypershiftTest struct {
*testing.T
ctx context.Context
Expand Down Expand Up @@ -72,7 +72,7 @@ func (h *hypershiftTest) Execute(opts *core.CreateOptions, platform hyperv1.Plat

if h.test != nil && !h.Failed() {
h.Run("Main", func(t *testing.T) {
h.test(t, h.client, hostedCluster)
h.test(t, NewWithT(t), h.client, hostedCluster)
})
}
}
Expand Down

0 comments on commit 922ef19

Please sign in to comment.