Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix spelling typos: scheduable -> schedulable #97880

Merged
merged 1 commit into from Jan 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/kubeadm/app/phases/upgrade/postupgrade.go
Expand Up @@ -184,7 +184,7 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfigurati
return err
}

// If we're dry-running or there are no scheduable nodes available, we don't need to wait for the new DNS addon to become ready
// If we're dry-running or there are no schedulable nodes available, we don't need to wait for the new DNS addon to become ready
if !dryRun && len(nodes.Items) != 0 {
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), installedDeploymentName, metav1.GetOptions{})
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/internal/queue/scheduling_queue.go
Expand Up @@ -137,7 +137,7 @@ type PriorityQueue struct {
// when a pod is popped.
schedulingCycle int64
// moveRequestCycle caches the sequence number of scheduling cycle when we
// received a move request. Unscheduable pods in and before this scheduling
// received a move request. Unschedulable pods in and before this scheduling
// cycle will be put back to activeQueue if we were trying to schedule them
// when we received move request.
moveRequestCycle int64
Expand Down
2 changes: 1 addition & 1 deletion staging/src/k8s.io/api/core/v1/well_known_taints.go
Expand Up @@ -27,7 +27,7 @@ const (
TaintNodeUnreachable = "node.kubernetes.io/unreachable"

// TaintNodeUnschedulable will be added when node becomes unschedulable
// and removed when node becomes scheduable.
// and removed when node becomes schedulable.
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"

// TaintNodeMemoryPressure will be added when node has memory pressure
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/autoscaling/dns_autoscaling.go
Expand Up @@ -243,13 +243,13 @@ func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear)
replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica)
}
if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes.Items)) / params.coresPerReplica)
replicasFromCores = math.Ceil(float64(getSchedulableCores(nodes.Items)) / params.coresPerReplica)
}
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
}
}

func getScheduableCores(nodes []v1.Node) int64 {
func getSchedulableCores(nodes []v1.Node) int64 {
var sc resource.Quantity
for _, node := range nodes {
if !node.Spec.Unschedulable {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/framework/util.go
Expand Up @@ -1022,7 +1022,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
}

// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)

Expand Down
4 changes: 2 additions & 2 deletions test/e2e/storage/testsuites/subpath.go
Expand Up @@ -959,7 +959,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
// Disruptive test run serially, we can cache all voluem global mount
// points and verify after the test that we do not leak any global mount point.
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err, "while listing scheduable nodes")
framework.ExpectNoError(err, "while listing schedulable nodes")
globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
for _, node := range nodeList.Items {
globalMountPointsByNode[node.Name] = utils.FindVolumeGlobalMountPoints(hostExec, &node)
Expand Down Expand Up @@ -993,7 +993,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
podNode = &nodeList.Items[i]
}
}
framework.ExpectNotEqual(podNode, nil, "pod node should exist in scheduable nodes")
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")

utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/vsphere_utils.go
Expand Up @@ -762,7 +762,7 @@ func GetReadySchedulableNodeInfos() []*NodeInfo {
}

// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
// if multiple nodes are present with Ready and Schedulable state then one of the Node is selected randomly
// and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos()
Expand Down
2 changes: 1 addition & 1 deletion test/integration/volumescheduling/volume_binding_test.go
Expand Up @@ -381,7 +381,7 @@ func TestVolumeBindingRescheduling(t *testing.T) {
// Trigger
test.trigger(config)

// Wait for pod is scheduled or unscheduable.
// Wait for pod is scheduled or unschedulable.
if !test.shouldFail {
klog.Infof("Waiting for pod is scheduled")
if err := waitForPodToSchedule(config.client, test.pod); err != nil {
Expand Down