Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Minion->Node rename: comments/vars for e2e.go, e2e.sh, resize_nodes.go for #1111 #17993

Merged
merged 2 commits into from
Dec 3, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions hack/e2e.go
Expand Up @@ -60,7 +60,7 @@ const (
downloadDirName = "_output/downloads"
tarDirName = "server"
tempDirName = "upgrade-e2e-temp-dir"
minMinionCount = 2
minNodeCount = 2
)

var (
Expand Down Expand Up @@ -185,7 +185,7 @@ func Up() bool {

// Ensure that the cluster is large engough to run the e2e tests.
func ValidateClusterSize() {
// Check that there are at least minMinionCount minions running
// Check that there are at least minNodeCount nodes running
cmd := exec.Command(path.Join(*root, "hack/e2e-internal/e2e-cluster-size.sh"))
if *verbose {
cmd.Stderr = os.Stderr
Expand All @@ -200,8 +200,8 @@ func ValidateClusterSize() {
log.Fatalf("Could not count number of nodes to validate cluster size (%s)", err)
}

if numNodes < minMinionCount {
log.Fatalf("Cluster size (%d) is too small to run e2e tests. %d Minions are required.", numNodes, minMinionCount)
if numNodes < minNodeCount {
log.Fatalf("Cluster size (%d) is too small to run e2e tests. %d Nodes are required.", numNodes, minNodeCount)
}
}

Expand Down
4 changes: 2 additions & 2 deletions hack/jenkins/e2e.sh
Expand Up @@ -320,7 +320,7 @@ GCE_FLAKY_TESTS=(
"GCE\sL7\sLoadBalancer\sController" # issue: #17518
"DaemonRestart\sController\sManager" # issue: #17829
"Resource\susage\sof\ssystem\scontainers" # issue: #13931
"allows\sscheduling\sof\spods\son\sa\sminion\safter\sit\srejoins\sthe\scluster" # file: resize_nodes.go, issue: #17830
"allows\sscheduling\sof\spods\son\sa\snode\safter\sit\srejoins\sthe\scluster" # file: resize_nodes.go, issue: #17830
"NodeOutOfDisk" # issue: 17687
)

Expand Down Expand Up @@ -1229,7 +1229,7 @@ case ${JOB_NAME} in
NUM_NODES="11"
MASTER_SIZE="n1-standard-4"
NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core
# so NUM_NODES x cores_per_minion should
# so NUM_NODES x cores_per_node should
# be set accordingly.
KUBE_GCE_INSTANCE_PREFIX="kubemark1000"
E2E_ZONE="asia-east1-a"
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/resize_nodes.go
Expand Up @@ -485,7 +485,7 @@ var _ = Describe("Nodes", func() {
})

Describe("Network", func() {
Context("when a minion node becomes unreachable", func() {
Context("when a node becomes unreachable", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2)
Expand All @@ -497,8 +497,8 @@ var _ = Describe("Nodes", func() {
// 1. pods from a uncontactable nodes are rescheduled
// 2. when a node joins the cluster, it can host new pods.
// Factor out the cases into two separate tests.
It("[replication controller] recreates pods scheduled on the unreachable minion node "+
"AND allows scheduling of pods on a minion after it rejoins the cluster", func() {
It("[replication controller] recreates pods scheduled on the unreachable node "+
"AND allows scheduling of pods on a node after it rejoins the cluster", func() {

// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
Expand Down