forked from kubernetes/kubernetes
/
exceptions.txt
131 lines (131 loc) · 13.1 KB
/
exceptions.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
Vagrantfile: node_ip = $node_ips[n]
cluster/addons/addon-manager/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits
cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy
cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"]
cluster/aws/templates/configure-vm-aws.sh: # We set the hostname_override to the full EC2 private dns name
cluster/aws/templates/configure-vm-aws.sh: api_servers: '${API_SERVERS}'
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "hostname_override"
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "runtime_config"
cluster/aws/templates/configure-vm-aws.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
cluster/centos/util.sh: local node_ip=${node#*@}
cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}'
cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}'
cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
cluster/gce/configure-vm.sh: env-to-grains "feature_gates"
cluster/gce/configure-vm.sh: env-to-grains "runtime_config"
cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
cluster/gce/gci/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
cluster/gce/util.sh: local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
cluster/juju/layers/kubernetes/reactive/k8s.py: check_call(split(cmd.format(kubeconfig, cluster_name, server, ca)))
cluster/juju/layers/kubernetes/reactive/k8s.py: check_call(split(cmd.format(kubeconfig, context, cluster_name, user)))
cluster/juju/layers/kubernetes/reactive/k8s.py: client_key = '/srv/kubernetes/client.key'
cluster/juju/layers/kubernetes/reactive/k8s.py: cluster_name = 'kubernetes'
cluster/juju/layers/kubernetes/reactive/k8s.py: tlslib.client_key(None, client_key, user='ubuntu', group='ubuntu')
cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]}
cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]}
cluster/log-dump.sh: local -r node_name="${1}"
cluster/log-dump.sh: for node_name in "${node_names[@]}"; do
cluster/log-dump.sh:readonly report_dir="${1:-_artifacts}"
cluster/mesos/docker/km/build.sh: km_path=$(find-binary km darwin/amd64)
cluster/photon-controller/templates/salt-master.sh: api_servers: $MASTER_NAME
cluster/photon-controller/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
cluster/photon-controller/util.sh: node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}')
cluster/photon-controller/util.sh: local cert_dir="/srv/kubernetes"
cluster/photon-controller/util.sh: node_name=${1}
cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
cluster/saltbase/salt/etcd/etcd.manifest: "value": "{{ storage_backend }}"
cluster/saltbase/salt/etcd/etcd.manifest:{% set storage_backend = pillar.get('storage_backend', 'etcd2') -%}
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = params + " " + feature_gates -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:{% set params = log_level + " " + feature_gates + " " + test_args -%}
cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest:{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kubelet/default: {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
cluster/saltbase/salt/kubelet/default: {% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
cluster/saltbase/salt/kubelet/default: {% set kubelet_port="--port=" + pillar['kubelet_port'] %}
cluster/saltbase/salt/kubelet/default: {% set node_labels="--node-labels=" + pillar['node_labels'] %}
cluster/saltbase/salt/kubelet/default:{% if grains['feature_gates'] is defined -%}
cluster/saltbase/salt/kubelet/default:{% if pillar.get('non_masquerade_cidr','') -%}
cluster/saltbase/salt/opencontrail-networking-master/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
cluster/saltbase/salt/opencontrail-networking-minion/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
cluster/saltbase/salt/supervisor/kubelet-checker.sh: {% set kubelet_port = pillar['kubelet_port'] -%}
cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes
cluster/ubuntu/util.sh: local node_ip=${1}
cluster/vagrant/provision-utils.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cluster/vagrant/provision-utils.sh: node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cluster/vagrant/provision-utils.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
cluster/vsphere/templates/salt-master.sh: cloud_config: $CLOUD_CONFIG
cluster/vsphere/templates/salt-minion.sh: cloud_config: $CLOUD_CONFIG
examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname)
examples/storage/cassandra/image/files/run.sh: cluster_name \
examples/storage/vitess/env.sh: node_ip=$(get_node_ip)
federation/cluster/common.sh: local cert_dir="${kube_temp}/easy-rsa-master/easyrsa3"
federation/deploy/config.json.sample: "cloud_provider": "gce",
federation/deploy/config.json.sample: "cloud_provider": "gce",
federation/deploy/config.json.sample: "cloud_provider": "gce",
federation/deploy/config.json.sample: "cluster_cidr": "10.180.0.0/14",
federation/deploy/config.json.sample: "cluster_cidr": "10.184.0.0/14",
federation/deploy/config.json.sample: "cluster_cidr": "10.188.0.0/14",
federation/deploy/config.json.sample: "cluster_name": "cluster1-kubernetes",
federation/deploy/config.json.sample: "cluster_name": "cluster2-kubernetes",
federation/deploy/config.json.sample: "cluster_name": "cluster3-kubernetes",
federation/deploy/config.json.sample: "num_nodes": 3,
federation/deploy/config.json.sample: "num_nodes": 3,
federation/deploy/config.json.sample: "num_nodes": 3,
hack/local-up-cluster.sh: advertise_address="--advertise_address=${API_HOST_IP}"
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
hack/local-up-cluster.sh: advertise_address=""
hack/local-up-cluster.sh: runtime_config=""
hack/make-rules/test-e2e-node.sh: image_project=${IMAGE_PROJECT:-"google-containers"}
hack/make-rules/test-e2e-node.sh: delete_instances=${DELETE_INSTANCES:-"false"}
hack/make-rules/test-e2e-node.sh: image_project=${IMAGE_PROJECT:-"kubernetes-node-e2e-images"}
hack/test-update-storage-objects.sh: local storage_backend=${1:-"${STORAGE_BACKEND_ETCD2}"}
hack/test-update-storage-objects.sh: local storage_media_type=${3:-""}
hack/test-update-storage-objects.sh: local storage_versions=${2:-""}
hack/test-update-storage-objects.sh: source_file=${test_data[0]}
hack/test-update-storage-objects.sh:# source_file,resource,namespace,name,old_version,new_version
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: ContainerPort *int32 `protobuf:"varint,2,opt,name=container_port,json=containerPort" json:"container_port,omitempty"`
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: OomScoreAdj *int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj" json:"oom_score_adj,omitempty"`
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: PodCidr *string `protobuf:"bytes,1,opt,name=pod_cidr,json=podCidr" json:"pod_cidr,omitempty"`
pkg/kubelet/api/v1alpha1/runtime/api.pb.go: RuntimeConfig *RuntimeConfig `protobuf:"bytes,1,opt,name=runtime_config,json=runtimeConfig" json:"runtime_config,omitempty"`
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional RuntimeConfig runtime_config = 1;
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional int32 container_port = 2;
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional int64 oom_score_adj = 5;
pkg/kubelet/api/v1alpha1/runtime/api.proto: optional string pod_cidr = 1;
pkg/kubelet/cm/container_manager_linux.go: glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
pkg/kubelet/cm/container_manager_linux.go: glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
pkg/kubelet/network/hairpin/hairpin.go: hairpinModeRelativePath = "hairpin_mode"
pkg/kubelet/qos/policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
pkg/kubelet/qos/policy_test.go: highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
pkg/kubelet/qos/policy_test.go: lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned.
pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid)
pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj")
pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj for all processes in cgroup cgroupName.
pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
test/e2e/common/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
test/e2e/common/downwardapi_volume.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath),
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be < %d; found %d", pid, expectedMaxOOMScoreAdj, oomScore)
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be >= %d; found %d", pid, expectedMinOOMScoreAdj, oomScore)
test/e2e_node/container_manager_test.go: return fmt.Errorf("failed to get oom_score_adj for %d", pid)
test/e2e_node/container_manager_test.go: return fmt.Errorf("failed to get oom_score_adj for %d: %v", pid, err)
test/e2e_node/container_manager_test.go: procfsPath := path.Join("/proc", strconv.Itoa(pid), "oom_score_adj")
test/images/mount-tester/mt.go: flag.BoolVar(&breakOnExpectedContent, "break_on_expected_content", true, "Break out of loop on expected content, (use with --file_content_in_loop flag only)")
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")