Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add CIDR allocator for NodeController #19242

Merged
merged 4 commits into from
May 20, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions cluster/gce/trusty/configure-helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,9 @@ start_kube_controller_manager() {
if [ -n "${CLUSTER_IP_RANGE:-}" ]; then
params="${params} --cluster-cidr=${CLUSTER_IP_RANGE}"
fi
if [ -n "${SERVICE_IP_RANGE:-}" ]; then
params="${params} --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [ "${ALLOCATE_NODE_CIDRS:-}" = "true" ]; then
params="${params} --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
fi
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
{% set cluster_name = "" -%}
{% set cluster_cidr = "" -%}
{% set allocate_node_cidrs = "" -%}
{% set service_cluster_ip_range = "" %}
{% set terminated_pod_gc = "" -%}


Expand All @@ -10,6 +11,9 @@
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
{% endif -%}
{% if pillar['service_cluster_ip_range'] is defined and pillar['service_cluster_ip_range'] != "" -%}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You need to create those pillars in configure-vm.sh

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@gmarek: That pillar is defined.

service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, yes - another advantage of reusing existing flags;)

{% set service_cluster_ip_range = "--service_cluster_ip_range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
# When we're using flannel it is responsible for cidr allocation.
# This is expected to be a short-term compromise.
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
Expand Down Expand Up @@ -59,7 +63,7 @@
{% set log_level = pillar['controller_manager_test_log_level'] -%}
{% endif -%}

{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}


# test_args has to be kept at the end, so they'll overwrite any prior configuration
Expand Down
2 changes: 1 addition & 1 deletion cmd/integration/integration.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
go podInformer.Run(wait.NeverStop)

nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(),
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
40*time.Second, 60*time.Second, 5*time.Second, nil, nil, 0, false)
nodeController.Run(5 * time.Second)
cadvisorInterface := new(cadvisortest.Fake)

Expand Down
3 changes: 2 additions & 1 deletion cmd/kube-controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,10 +227,11 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig

// this cidr has been validated already
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
_, serviceCIDR, _ := net.ParseCIDR(s.ServiceCIDR)
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod.Duration)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

Expand Down
3 changes: 3 additions & 0 deletions cmd/kube-controller-manager/app/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ func NewCMServer() *CMServer {
NodeStartupGracePeriod: unversioned.Duration{Duration: 60 * time.Second},
NodeMonitorPeriod: unversioned.Duration{Duration: 5 * time.Second},
ClusterName: "kubernetes",
NodeCIDRMaskSize: 24,
TerminatedPodGCThreshold: 12500,
VolumeConfiguration: componentconfig.VolumeConfiguration{
EnableHostPathProvisioning: false,
Expand Down Expand Up @@ -141,6 +142,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster.")
fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.")
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
Expand Down
3 changes: 2 additions & 1 deletion contrib/mesos/pkg/controllermanager/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,10 +153,11 @@ func (s *CMServer) Run(_ []string) error {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
_, serviceCIDR, _ := net.ParseCIDR(s.ServiceCIDR)
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod.Duration)

nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod.Duration, time.Now)
Expand Down
4 changes: 3 additions & 1 deletion docs/admin/kube-controller-manager.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ kube-controller-manager
--master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
--min-resync-period=12h0m0s: The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod
--namespace-sync-period=5m0s: The period for syncing namespace life-cycle updates
--node-cidr-mask-size=24: Mask size for node cidr in cluster.
--node-monitor-grace-period=40s: Amount of time which we allow running Node to be unresponsive before marking it unhealty. Must be N times more than kubelet's nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet to post node status.
--node-monitor-period=5s: The period for syncing NodeStatus in NodeController.
--node-startup-grace-period=1m0s: Amount of time which we allow starting Node to be unresponsive before marking it unhealty.
Expand All @@ -106,11 +107,12 @@ kube-controller-manager
--resource-quota-sync-period=5m0s: The period for syncing quota usage status in the system
--root-ca-file="": If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.
--service-account-private-key-file="": Filename containing a PEM-encoded private RSA key used to sign service account tokens.
--service-cluster-ip-range="": CIDR Range for Services in cluster.
--service-sync-period=5m0s: The period for syncing services with their external load balancers
--terminated-pod-gc-threshold=12500: Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.
```

###### Auto generated by spf13/cobra on 21-Apr-2016
###### Auto generated by spf13/cobra on 17-May-2016


<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
Expand Down
2 changes: 1 addition & 1 deletion hack/verify-flags/exceptions.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ cluster/photon-controller/util.sh: node_name=${1}
cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
Expand Down
1 change: 1 addition & 0 deletions hack/verify-flags/known-flags.txt
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ network-plugin
network-plugin-dir
no-headers
no-suggestions
node-cidr-mask-size
node-instance-group
node-ip
node-labels
Expand Down
2 changes: 2 additions & 0 deletions pkg/apis/componentconfig/deep_copy_generated.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ func DeepCopy_componentconfig_KubeControllerManagerConfiguration(in KubeControll
out.EnableProfiling = in.EnableProfiling
out.ClusterName = in.ClusterName
out.ClusterCIDR = in.ClusterCIDR
out.ServiceCIDR = in.ServiceCIDR
out.NodeCIDRMaskSize = in.NodeCIDRMaskSize
out.AllocateNodeCIDRs = in.AllocateNodeCIDRs
out.RootCAFile = in.RootCAFile
out.ContentType = in.ContentType
Expand Down