Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix misc typos #15918

Merged
merged 1 commit into from
Oct 20, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions pkg/cloudprovider/providers/aws/aws.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ const TagNameKubernetesCluster = "KubernetesCluster"
// We sometimes read to see if something exists; then try to create it if we didn't find it
// This can fail once in a consistent system if done in parallel
// In an eventually consistent system, it could fail unboundedly
// MaxReadThenCreateRetries sets the maxiumum number of attempts we will make
// MaxReadThenCreateRetries sets the maximum number of attempts we will make
const MaxReadThenCreateRetries = 30

// Abstraction over AWS, to allow mocking/other implementations
Expand Down Expand Up @@ -1563,7 +1563,7 @@ func (s *AWSCloud) listSubnetIDsinVPC(vpcId string) ([]string, error) {
}

// EnsureTCPLoadBalancer implements TCPLoadBalancer.EnsureTCPLoadBalancer
// TODO(justinsb) It is weird that these take a region. I suspect it won't work cross-region anwyay.
// TODO(justinsb) It is weird that these take a region. I suspect it won't work cross-region anyway.
func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, ports []*api.ServicePort, hosts []string, affinity api.ServiceAffinity) (*api.LoadBalancerStatus, error) {
glog.V(2).Infof("EnsureTCPLoadBalancer(%v, %v, %v, %v, %v)", name, region, publicIP, ports, hosts)

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/autoscaling_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ const (
)

/*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warnig: memory not supported)
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
typical use case:
rc.ConsumeCPU(600)
// ... check your assumption here
Expand Down
12 changes: 6 additions & 6 deletions test/e2e/cluster_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ func runCmd(command string, args ...string) (string, string, error) {
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never retruns before the test gets killed.
// hangs and never returns before the test gets killed.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
err := cmd.Run()
Expand Down Expand Up @@ -447,7 +447,7 @@ func validate(f Framework, svcNameWant, rcNameWant string, ingress api.LoadBalan
}

// migRollingUpdate starts a MIG rolling update, upgrading the nodes to a new
// instance template named tmpl, and waits up to nt times the nubmer of nodes
// instance template named tmpl, and waits up to nt times the number of nodes
// for it to complete.
func migRollingUpdate(tmpl string, nt time.Duration) error {
By(fmt.Sprintf("starting the MIG rolling update to %s", tmpl))
Expand All @@ -464,7 +464,7 @@ func migRollingUpdate(tmpl string, nt time.Duration) error {
return nil
}

// migTemlate (GCE/GKE-only) returns the name of the MIG template that the
// migTemplate (GCE/GKE-only) returns the name of the MIG template that the
// nodes of the cluster use.
func migTemplate() (string, error) {
var errLast error
Expand Down Expand Up @@ -514,7 +514,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
// NOTE(mikedanese): If you are changing this gcloud command, update
// cluster/gce/upgrade.sh to match this EXACTLY.
// A `rolling-updates start` call outputs what we want to stderr.
_, output, err := retryCmd("gcloud", append(migUdpateCmdBase(),
_, output, err := retryCmd("gcloud", append(migUpdateCmdBase(),
"rolling-updates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
Expand Down Expand Up @@ -566,7 +566,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
//
// TODO(mikedanese): Remove this hack on July 29, 2015 when the migration to
// `gcloud alpha compute rolling-updates` is complete.
func migUdpateCmdBase() []string {
func migUpdateCmdBase() []string {
b := []string{"preview"}
a := []string{"rolling-updates", "-h"}
if err := exec.Command("gcloud", append(b, a...)...).Run(); err != nil {
Expand All @@ -586,7 +586,7 @@ func migRollingUpdatePoll(id string, nt time.Duration) error {
Logf("Waiting up to %v for MIG rolling update to complete.", timeout)
if wait.Poll(restartPoll, timeout, func() (bool, error) {
// A `rolling-updates describe` call outputs what we want to stdout.
output, _, err := retryCmd("gcloud", append(migUdpateCmdBase(),
output, _, err := retryCmd("gcloud", append(migUpdateCmdBase(),
"rolling-updates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/daemon_restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ import (
// This test primarily checks 2 things:
// 1. Daemons restart automatically within some sane time (10m).
// 2. They don't take abnormal actions when restarted in the steady state.
// - Controller manager sholdn't overshoot replicas
// - Controller manager shouldn't overshoot replicas
// - Kubelet shouldn't restart containers
// - Scheduler should continue assigning hosts to new pods

Expand Down
4 changes: 2 additions & 2 deletions test/e2e/es_cluster_logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
// Use a unique namespace for the resources created in this test.
ns := f.Namespace.Name
name := "synthlogger"
// Form a unique name to taint log lines to be colelcted.
// Form a unique name to taint log lines to be collected.
// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
taintName := strings.Replace(ns+name, "-", "_", -1)

Expand Down Expand Up @@ -240,7 +240,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
}
}()

// Wait for the syntehtic logging pods to finish.
// Wait for the synthetic logging pods to finish.
By("Waiting for the pods to succeed.")
for _, pod := range podNames {
err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/host_path.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ var _ = Describe("hostPath", func() {
fmt.Sprintf("--retry_time=%d", retryDuration),
}
//Read the content of the file with the second container to
//verify volumes being shared properly among continers within the pod.
//verify volumes being shared properly among containers within the pod.
testContainerOutput("hostPath r/w", c, pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file",
}, namespace.Name,
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, comp
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
case "randomlySucceedOrFail":
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
// Dividing by 16384 gives roughly 50/50 chance of succeess.
// Dividing by 16384 gives roughly 50/50 chance of success.
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
}
return job
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/kubectl.go
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ func validateReplicationControllerConfiguration(rc api.ReplicationController) {
}

// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function veridies that the image jpg = kitten.jpg
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field.
func getUDData(jpgExpected string, ns string) func(*client.Client, string) error {

Expand Down
8 changes: 4 additions & 4 deletions test/e2e/kubelet_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nod
return badMetrics, nil
}

// getContainerInfo contacts kubelet for the container informaton. The "Stats"
// getContainerInfo contacts kubelet for the container information. The "Stats"
// in the returned ContainerInfo is subject to the requirements in statsRequest.
func getContainerInfo(c *client.Client, nodeName string, req *kubelet.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) {
reqBody, err := json.Marshal(req)
Expand Down Expand Up @@ -214,14 +214,14 @@ func (r *containerResourceUsage) isStrictlyGreaterThan(rhs *containerResourceUsa
// cpuInterval.
// The acceptable range of the interval is 2s~120s. Be warned that as the
// interval (and #containers) increases, the size of kubelet's response
// could be sigificant. E.g., the 60s interval stats for ~20 containers is
// could be significant. E.g., the 60s interval stats for ~20 containers is
// ~1.5MB. Don't hammer the node with frequent, heavy requests.
//
// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two
// stats points to compute the cpu usage over the interval. Assuming cadvisor
// polls every second, we'd need to get N stats points for N-second interval.
// Note that this is an approximation and may not be accurate, hence we also
// write the actual interval used for calcuation (based on the timestampes of
// write the actual interval used for calculation (based on the timestamps of
// the stats points in containerResourceUsage.CPUInterval.
func getOneTimeResourceUsageOnNode(c *client.Client, nodeName string, cpuInterval time.Duration) (map[string]*containerResourceUsage, error) {
numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds)
Expand Down Expand Up @@ -367,7 +367,7 @@ func newResourceCollector(c *client.Client, nodeName string, containerNames []st
}
}

// Start starts a goroutine to poll the node every pollingInerval.
// Start starts a goroutine to poll the node every pollingInterval.
func (r *resourceCollector) Start() {
r.stopCh = make(chan struct{}, 1)
// Keep the last observed stats for comparison.
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/kubeproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,10 +184,10 @@ func (config *KubeProxyTestConfig) hitNodePort(epCount int) {
By("dialing(http) endpoint container --> node1:nodeHttpPort")
config.dialFromEndpointContainer("http", node1_IP, nodeHttpPort, tries, epCount)

// TODO: doesnt work because masquerading is not done
// TODO: doesn't work because masquerading is not done
By("TODO: Test disabled. dialing(udp) node --> 127.0.0.1:nodeUdpPort")
//config.dialFromNode("udp", "127.0.0.1", nodeUdpPort, tries, epCount)
// TODO: doesnt work because masquerading is not done
// TODO: doesn't work because masquerading is not done
By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort")
//config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount)

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/monitoring.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector(), fields.Everything())
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/pd.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ func createPD() (string, error) {
pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))

zone := testContext.CloudConfig.Zone
// TODO: make this hit the compute API directly instread of shelling out to gcloud.
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
err := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
if err != nil {
return "", err
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ import (
const (
// How long each node is given during a process that restarts all nodes
// before the test is considered failed. (Note that the total time to
// restart all nodes will be this number times the nubmer of nodes.)
// restart all nodes will be this number times the number of nodes.)
restartPerNodeTimeout = 5 * time.Minute

// How often to poll the statues of a restart.
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/serviceloadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ type LBCTester interface {
getName() string
}

// haproxyControllerTester implementes LBCTester for bare metal haproxy LBs.
// haproxyControllerTester implements LBCTester for bare metal haproxy LBs.
type haproxyControllerTester struct {
client *client.Client
cfg string
Expand Down Expand Up @@ -174,7 +174,7 @@ func (s *ingManager) start(namespace string) (err error) {
}
}
// Create services.
// Note that it's upto the caller to make sure the service actually matches
// Note that it's up to the caller to make sure the service actually matches
// the pods of the rc.
for _, svcPath := range s.svcCfgPaths {
svc := svcFromManifest(svcPath)
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ func deleteNS(c *client.Client, namespace string, timeout time.Duration) error {
}
}

// a timeout occured
// a timeout occurred
if err != nil {
if missingTimestamp {
return fmt.Errorf("namespace %s was not deleted within limit: %v, some pods were not marked with a deletion timestamp, pods remaining: %v", namespace, err, remaining)
Expand Down Expand Up @@ -888,7 +888,7 @@ func loadClient() (*client.Client, error) {
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utilty function.
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
Expand Down