Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rearrange parallelism in AKS machine pool e2e tests #4874

Merged
merged 1 commit into from
May 24, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
123 changes: 53 additions & 70 deletions test/e2e/aks_machinepools.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,109 +47,92 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
input := inputGetter()
var wg sync.WaitGroup

originalReplicas := map[types.NamespacedName]int32{}
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Most of the changes in this file are indentation, so I'd suggest ticking the "hide whitespace" checkbox.

for _, mp := range input.MachinePools {
originalReplicas[client.ObjectKeyFromObject(mp)] = ptr.Deref(mp.Spec.Replicas, 0)
}

By("Scaling the machine pools out")
for _, mp := range input.MachinePools {
wg.Add(1)
go func(mp *expv1.MachinePool) {
defer GinkgoRecover()
defer wg.Done()

originalReplicas := ptr.Deref(mp.Spec.Replicas, 0)

Byf("Scaling machine pool %s out", mp.Name)
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
Cluster: input.Cluster,
Replicas: ptr.Deref(mp.Spec.Replicas, 0) + 1,
MachinePools: []*expv1.MachinePool{mp},
WaitForMachinePoolToScale: input.WaitIntervals,
})
}(mp)
}
wg.Wait()

By("Scaling the machine pools in")
for _, mp := range input.MachinePools {
wg.Add(1)
go func(mp *expv1.MachinePool) {
defer GinkgoRecover()
defer wg.Done()
Byf("Scaling machine pool %s in", mp.Name)
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
Cluster: input.Cluster,
Replicas: ptr.Deref(mp.Spec.Replicas, 0) - 1,
MachinePools: []*expv1.MachinePool{mp},
WaitForMachinePoolToScale: input.WaitIntervals,
})
}(mp)
}
wg.Wait()

By("Scaling the machine pools to zero")
// System node pools cannot be scaled to 0, so only include user node pools.
var machinePoolsToScale []*expv1.MachinePool
for _, mp := range input.MachinePools {
switch mp.Spec.Template.Spec.InfrastructureRef.Kind {
case infrav1.AzureManagedMachinePoolKind:
ammp := &infrav1.AzureManagedMachinePool{}
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}, ammp)
Expect(err).NotTo(HaveOccurred())

if ammp.Spec.Mode != string(infrav1.NodePoolModeSystem) {
machinePoolsToScale = append(machinePoolsToScale, mp)
}
case infrav1exp.AzureASOManagedMachinePoolKind:
ammp := &infrav1exp.AzureASOManagedMachinePool{}
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}, ammp)
Expect(err).NotTo(HaveOccurred())

resources, err := mutators.ToUnstructured(ctx, ammp.Spec.Resources)
Expect(err).NotTo(HaveOccurred())
for _, resource := range resources {
if resource.GetKind() != "ManagedClustersAgentPool" {
continue
// System node pools cannot be scaled to 0, so only include user node pools.
isUserPool := false
switch mp.Spec.Template.Spec.InfrastructureRef.Kind {
case infrav1.AzureManagedMachinePoolKind:
ammp := &infrav1.AzureManagedMachinePool{}
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}, ammp)
Expect(err).NotTo(HaveOccurred())

if ammp.Spec.Mode != string(infrav1.NodePoolModeSystem) {
isUserPool = true
}
// mode may not be set in spec. Get the ASO object and check in status.
resource.SetNamespace(ammp.Namespace)
agentPool := &asocontainerservicev1.ManagedClustersAgentPool{}
Expect(bootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(resource), agentPool)).To(Succeed())
if ptr.Deref(agentPool.Status.Mode, "") != asocontainerservicev1.AgentPoolMode_STATUS_System {
machinePoolsToScale = append(machinePoolsToScale, mp)
case infrav1exp.AzureASOManagedMachinePoolKind:
ammp := &infrav1exp.AzureASOManagedMachinePool{}
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}, ammp)
Expect(err).NotTo(HaveOccurred())

resources, err := mutators.ToUnstructured(ctx, ammp.Spec.Resources)
Expect(err).NotTo(HaveOccurred())
for _, resource := range resources {
if resource.GetKind() != "ManagedClustersAgentPool" {
continue
}
// mode may not be set in spec. Get the ASO object and check in status.
resource.SetNamespace(ammp.Namespace)
agentPool := &asocontainerservicev1.ManagedClustersAgentPool{}
Expect(bootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(resource), agentPool)).To(Succeed())
if ptr.Deref(agentPool.Status.Mode, "") != asocontainerservicev1.AgentPoolMode_STATUS_System {
isUserPool = true
}
break
}
break
}
}
}

framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
Cluster: input.Cluster,
Replicas: 0,
MachinePools: machinePoolsToScale,
WaitForMachinePoolToScale: input.WaitIntervals,
})
if isUserPool {
Byf("Scaling the machine pool %s to zero", mp.Name)
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
Cluster: input.Cluster,
Replicas: 0,
MachinePools: []*expv1.MachinePool{mp},
WaitForMachinePoolToScale: input.WaitIntervals,
})
}

By("Restoring initial replica count")
for _, mp := range input.MachinePools {
wg.Add(1)
go func(mp *expv1.MachinePool) {
defer GinkgoRecover()
defer wg.Done()
Byf("Restoring initial replica count for machine pool %s", mp.Name)
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
Cluster: input.Cluster,
Replicas: originalReplicas[client.ObjectKeyFromObject(mp)],
Replicas: originalReplicas,
MachinePools: []*expv1.MachinePool{mp},
WaitForMachinePoolToScale: input.WaitIntervals,
})
}(mp)
}

wg.Wait()
}