Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implemented e2e test: cluster autoscaler with node selector. #26633

Merged
merged 1 commit into from
Jun 3, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
81 changes: 80 additions & 1 deletion test/e2e/cluster_size_autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,54 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size < nodeCount+1 }, scaleDownTimeout))
})

It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
labels := map[string]string{"cluster-autoscaling-test.special-node": "true"}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How will it behave with 1 ming?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will work fine: it will resize single mig.


By("Finding the smallest MIG")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}

By(fmt.Sprintf("Annotating nodes of the smallest MIG: %s", minMig))
nodes, err := GetGroupNodes(minMig)
nodesMap := map[string]struct{}{}
ExpectNoError(err)
for _, node := range nodes {
updateLabelsForNode(f, node, labels, nil)
nodesMap[node] = struct{}{}
}

CreateNodeSelectorPods(f, "node-selector", minSize+1, labels, false)

By("Waiting for new node to appear and annotating it")
WaitForGroupSize(minMig, int32(minSize+1))
newNodes, err := GetGroupNodes(minMig)
ExpectNoError(err)
for _, node := range newNodes {
if _, old := nodesMap[node]; !old {
updateLabelsForNode(f, node, labels, nil)
}
}

framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))

framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "node-selector"))
By("Removing labels from nodes")
for _, node := range newNodes {
updateLabelsForNode(f, node, map[string]string{}, []string{"cluster-autoscaling-test.special-node"})
}
restoreSizes(originalSizes)
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size <= nodeCount }, scaleDownTimeout))

})
})

func getGKEClusterUrl() string {
Expand Down Expand Up @@ -207,6 +255,25 @@ func doPut(url, content string) (string, error) {
return strBody, nil
}

func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))

config := &framework.RCConfig{
Client: f.Client,
Name: "node-selector",
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: "gcr.io/google_containers/pause-amd64:3.0",
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}

func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &framework.RCConfig{
Expand All @@ -222,7 +289,6 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
if expectRunning {
framework.ExpectNoError(err)
}

}

func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
Expand Down Expand Up @@ -297,3 +363,16 @@ func restoreSizes(sizes map[string]int) {
}
}
}

func updateLabelsForNode(f *framework.Framework, node string, addLabels map[string]string, rmLabels []string) {
n, err := f.Client.Nodes().Get(node)
ExpectNoError(err)
for _, label := range rmLabels {
delete(n.Labels, label)
}
for label, value := range addLabels {
n.Labels[label] = value
}
_, err = f.Client.Nodes().Update(n)
ExpectNoError(err)
}
3 changes: 2 additions & 1 deletion test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -2189,7 +2189,8 @@ func (config *RCConfig) create() error {
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: *config.DNSPolicy,
DNSPolicy: *config.DNSPolicy,
NodeSelector: config.NodeSelector,
},
},
},
Expand Down
21 changes: 21 additions & 0 deletions test/e2e/resize_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,27 @@ func ResizeGroup(group string, size int32) error {
}
}

func GetGroupNodes(group string) ([]string, error) {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
"list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
if err != nil {
return nil, err
}
re := regexp.MustCompile(".*RUNNING")
lines := re.FindStringSubmatch(string(output))
for i, line := range lines {
lines[i] = line[:strings.Index(line, " ")]
}
return lines, nil
} else {
return nil, fmt.Errorf("provider does not support InstanceGroups")
}
}

func GroupSize(group string) (int, error) {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
Expand Down