Skip to content

Commit

Permalink
fix: correct removal of preferred topology spread constraint (#2547)
Browse files Browse the repository at this point in the history
  • Loading branch information
tzneal committed Sep 23, 2022
1 parent 227850a commit c43ce8c
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 1 deletion.
2 changes: 1 addition & 1 deletion pkg/controllers/provisioning/scheduling/preferences.go
Expand Up @@ -93,7 +93,7 @@ func (p *Preferences) removeTopologySpreadScheduleAnyway(pod *v1.Pod) *string {
if tsc.WhenUnsatisfiable == v1.ScheduleAnyway {
msg := fmt.Sprintf("removing: spec.topologySpreadConstraints = %s", pretty.Concise(tsc))
pod.Spec.TopologySpreadConstraints[i] = pod.Spec.TopologySpreadConstraints[len(pod.Spec.TopologySpreadConstraints)-1]
pod.Spec.TopologySpreadConstraints = pod.Spec.TopologySpreadConstraints[1:]
pod.Spec.TopologySpreadConstraints = pod.Spec.TopologySpreadConstraints[:len(pod.Spec.TopologySpreadConstraints)-1]
return ptr.String(msg)
}
}
Expand Down
28 changes: 28 additions & 0 deletions pkg/controllers/provisioning/scheduling/suite_test.go
Expand Up @@ -1514,6 +1514,34 @@ var _ = Describe("Topology", func() {
})
})

Context("Combined Hostname and Zonal Topology", func() {
It("should spread pods while respecting both constraints", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}, {
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.ScheduleAnyway,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}

ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, controller,
MakePods(10, test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology})...,
)

// should get one pod per zone, can't schedule to test-zone-3
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1))
// and one pod per node
ExpectSkew(ctx, env.Client, "default", &topology[1]).To(ConsistOf(1, 1))
})
})

Context("Combined Hostname and Capacity Type Topology", func() {
It("should spread pods while respecting both constraints", func() {
topology := []v1.TopologySpreadConstraint{{
Expand Down

0 comments on commit c43ce8c

Please sign in to comment.