Skip to content

Commit

Permalink
Remove restriction that cluster-cidr be a class-b
Browse files Browse the repository at this point in the history
  • Loading branch information
cjcullen committed May 6, 2015
1 parent 7ce7568 commit fbd125e
Show file tree
Hide file tree
Showing 12 changed files with 33 additions and 27 deletions.
2 changes: 1 addition & 1 deletion cluster/gce/config-default.sh
Expand Up @@ -37,7 +37,7 @@ MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.244}.0.0/16"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3
Expand Down
2 changes: 1 addition & 1 deletion cluster/gce/config-test.sh
Expand Up @@ -36,7 +36,7 @@ INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.245}.0.0/16"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
Expand Down
2 changes: 1 addition & 1 deletion cluster/gce/configure-vm.sh
Expand Up @@ -236,7 +236,7 @@ function create-salt-pillar() {
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_class_b: '$(echo "$KUBE_GCE_CLUSTER_CLASS_B" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
Expand Down
2 changes: 1 addition & 1 deletion cluster/gce/coreos/helper.sh
Expand Up @@ -28,7 +28,7 @@ function build-kube-env {
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
KUBE_GCE_CLUSTER_CLASS_B: $(yaml-quote ${KUBE_GCE_CLUSTER_CLASS_B:-10.244})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
Expand Down
2 changes: 1 addition & 1 deletion cluster/gce/debian/helper.sh
Expand Up @@ -26,7 +26,7 @@ function build-kube-env {
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
KUBE_GCE_CLUSTER_CLASS_B: $(yaml-quote ${KUBE_GCE_CLUSTER_CLASS_B:-10.244})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
Expand Down
@@ -1,6 +1,6 @@
{% set machines = ""-%}
{% set cluster_name = "" -%}
{% set cluster_class_b = "" -%}
{% set cluster_cidr = "" -%}
{% set allocate_node_cidrs = "" -%}
{% set minion_regexp = "--minion_regexp=.*" -%}
{% set sync_nodes = "--sync_nodes=true" -%}
Expand All @@ -11,8 +11,8 @@
{% if pillar['instance_prefix'] is defined -%}
{% set cluster_name = "--cluster_name=" + pillar['instance_prefix'] -%}
{% endif -%}
{% if pillar['cluster_class_b'] is defined -%}
{% set cluster_class_b = "--cluster-class-b=" + pillar['cluster_class_b'] -%}
{% if pillar['cluster_cidr'] is defined -%}
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
{% endif -%}
{% if pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
Expand Down Expand Up @@ -55,7 +55,7 @@
{% endif -%}
{% endif -%}

{% set params = "--master=127.0.0.1:8080" + " " + machines + " " + cluster_name + " " + cluster_class_b + " " + allocate_node_cidrs + " " + minion_regexp + " " + cloud_provider + " " + sync_nodes + " " + cloud_config + " " + pillar['log_level'] -%}
{% set params = "--master=127.0.0.1:8080" + " " + machines + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + minion_regexp + " " + cloud_provider + " " + sync_nodes + " " + cloud_config + " " + pillar['log_level'] -%}

{
"apiVersion": "v1beta3",
Expand Down
2 changes: 1 addition & 1 deletion cmd/integration/integration.go
Expand Up @@ -225,7 +225,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
}}

nodeController := nodecontroller.NewNodeController(nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, "", "", false)
nodeController := nodecontroller.NewNodeController(nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, "", nil, false)
nodeController.Run(5*time.Second, true)
cadvisorInterface := new(cadvisor.Fake)

Expand Down
6 changes: 3 additions & 3 deletions cmd/kube-controller-manager/app/controllermanager.go
Expand Up @@ -79,7 +79,7 @@ type CMServer struct {
NodeMemory resource.Quantity

ClusterName string
ClusterClassB string
ClusterCIDR util.IPNet
AllocateNodeCIDRs bool
EnableProfiling bool

Expand Down Expand Up @@ -147,7 +147,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.Var(resource.NewQuantityFlagValue(&s.NodeMemory), "node-memory", "The amount of memory (in bytes) provisioned on each node")
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.StringVar(&s.ClusterClassB, "cluster-class-b", "10.244", "Class B network address for Pods in cluster.")
fs.Var(&s.ClusterCIDR, "cluster-cidr", "CIDR Range for Pods in cluster.")
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
Expand Down Expand Up @@ -230,7 +230,7 @@ func (s *CMServer) Run(_ []string) error {

nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources,
kubeClient, s.RegisterRetryCount, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, s.ClusterName, s.ClusterClassB, s.AllocateNodeCIDRs)
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList)

serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
Expand Down
2 changes: 1 addition & 1 deletion cmd/kubernetes/kubernetes.go
Expand Up @@ -132,7 +132,7 @@ func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU,

const nodeSyncPeriod = 10 * time.Second
nodeController := nodecontroller.NewNodeController(
nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst), 40*time.Second, 60*time.Second, 5*time.Second, "", "", false)
nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst), 40*time.Second, 60*time.Second, 5*time.Second, "", nil, false)
nodeController.Run(nodeSyncPeriod, true)

serviceController := servicecontroller.New(nil, cl, "kubernetes")
Expand Down
2 changes: 1 addition & 1 deletion hack/parallel-e2e.sh
Expand Up @@ -34,7 +34,7 @@ function down-clusters {
function up-clusters {
for count in $(seq 1 ${clusters}); do
export KUBE_GCE_INSTANCE_PREFIX=e2e-test-${USER}-${count}
export KUBE_GCE_CLUSTER_CLASS_B="10.$((${count}*2-1))"
export CLUSTER_IP_RANGE="10.$((${count}*2-1)).0.0/16"
export MASTER_IP_RANGE="10.$((${count}*2)).0.0/24"

local cluster_dir=${KUBE_ROOT}/_output/e2e/${KUBE_GCE_INSTANCE_PREFIX}
Expand Down
14 changes: 10 additions & 4 deletions pkg/cloudprovider/nodecontroller/nodecontroller.go
Expand Up @@ -89,7 +89,7 @@ type NodeController struct {
// TODO: Change node status monitor to watch based.
nodeMonitorPeriod time.Duration
clusterName string
clusterClassB string
clusterCIDR *net.IPNet
allocateNodeCIDRs bool
// Method for easy mocking in unittest.
lookupIP func(host string) ([]net.IP, error)
Expand All @@ -110,7 +110,7 @@ func NewNodeController(
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterName string,
clusterClassB string,
clusterCIDR *net.IPNet,
allocateNodeCIDRs bool) *NodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
Expand All @@ -120,6 +120,9 @@ func NewNodeController(
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}
if allocateNodeCIDRs && clusterCIDR == nil {
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
}
return &NodeController{
cloud: cloud,
matchRE: matchRE,
Expand All @@ -137,17 +140,20 @@ func NewNodeController(
lookupIP: net.LookupIP,
now: util.Now,
clusterName: clusterName,
clusterClassB: clusterClassB,
clusterCIDR: clusterCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
}
}

// Generates num pod CIDRs that could be assigned to nodes.
func (nc *NodeController) generateCIDRs(num int) util.StringSet {
res := util.NewStringSet()
cidrIP := nc.clusterCIDR.IP.To4()
for i := 0; i < num; i++ {
// TODO: Make the CIDRs configurable.
res.Insert(fmt.Sprintf("%v.%v.0/24", nc.clusterClassB, i))
b1 := byte(i >> 8)
b2 := byte(i % 256)
res.Insert(fmt.Sprintf("%d.%d.%d.0/24", cidrIP[0], cidrIP[1]+b1, cidrIP[2]+b2))
}
return res
}
Expand Down
16 changes: 8 additions & 8 deletions pkg/cloudprovider/nodecontroller/nodecontroller_test.go
Expand Up @@ -246,7 +246,7 @@ func TestRegisterNodes(t *testing.T) {
nodes.Items = append(nodes.Items, *newNode(machine))
}
nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
err := nodeController.registerNodes(&nodes, item.retryCount, time.Millisecond)
if !item.expectedFail && err != nil {
t.Errorf("unexpected error: %v", err)
Expand Down Expand Up @@ -332,7 +332,7 @@ func TestCreateGetStaticNodesWithSpec(t *testing.T) {
}
for _, item := range table {
nodeController := NewNodeController(nil, "", item.machines, &resources, nil, 10, time.Minute,
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
nodes, err := nodeController.getStaticNodesWithSpec()
if err != nil {
t.Errorf("unexpected error: %v", err)
Expand Down Expand Up @@ -394,7 +394,7 @@ func TestCreateGetCloudNodesWithSpec(t *testing.T) {

for _, item := range table {
nodeController := NewNodeController(item.fakeCloud, ".*", nil, &api.NodeResources{}, nil, 10, time.Minute,
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
nodes, err := nodeController.getCloudNodesWithSpec()
if err != nil {
t.Errorf("unexpected error: %v", err)
Expand Down Expand Up @@ -504,7 +504,7 @@ func TestSyncCloudNodes(t *testing.T) {
item.fakeNodeHandler.Fake = testclient.NewSimpleFake()
}
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
if err := nodeController.syncCloudNodes(); err != nil {
t.Errorf("unexpected error: %v", err)
}
Expand Down Expand Up @@ -588,7 +588,7 @@ func TestSyncCloudNodesEvictPods(t *testing.T) {
item.fakeNodeHandler.Fake = testclient.NewSimpleFake()
}
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
if err := nodeController.syncCloudNodes(); err != nil {
t.Errorf("unexpected error: %v", err)
}
Expand Down Expand Up @@ -628,7 +628,7 @@ func TestPopulateNodeAddresses(t *testing.T) {

for _, item := range table {
nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, nil, 10, time.Minute,
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
result, err := nodeController.populateAddresses(item.nodes)
// In case of IP querying error, we should continue.
if err != nil {
Expand Down Expand Up @@ -828,7 +828,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
for _, item := range table {
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10,
evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
nodeController.now = func() util.Time { return fakeNow }
if err := nodeController.monitorNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
Expand Down Expand Up @@ -1030,7 +1030,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {

for _, item := range table {
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(),
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
nodeController.now = func() util.Time { return fakeNow }
if err := nodeController.monitorNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
Expand Down

0 comments on commit fbd125e

Please sign in to comment.