diff --git a/README.md b/README.md index 8cfc728c..f3094633 100644 --- a/README.md +++ b/README.md @@ -66,25 +66,32 @@ data: "coresPerReplica": 2, "nodesPerReplica": 1, "min": 1, - "max": 100 + "max": 100, + "preventSinglePointFailure": true } ``` The equation of linear control mode as below: ``` replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) ) +replicas = min(replicas, max) +replicas = max(replicas, min) ``` -Notice that both `coresPerReplica` and `nodesPerReplica` are float. +When `preventSinglePointFailure` is set to `true`, controller ensures at least 2 replicas +if there are more than one node. For instance, given a cluster has 4 nodes and 13 cores. With above parameters, each replica could take care of 1 node. So we need `4 / 1 = 4` replicas to take care of all 4 nodes. And each replica could take care of 2 cores. We need `ceil(13 / 2) = 7` replicas to take care of all 13 cores. Controller will choose the greater one, which is `7` here, as the result. -Either one of the `coresPerReplica` or `nodesPerReplica` could be omitted. Both `min` and `max ` could be omitted. -If not set, `min` would be default to 1. +Either one of the `coresPerReplica` or `nodesPerReplica` could be omitted. All of `min`, `max` and +`preventSinglePointFailure` is optional. If not set, `min` would be default to `1`, +`preventSinglePointFailure` will be default to `false`. -The lowest number of replicas is set to 1. +Side notes: +- Both `coresPerReplica` and `nodesPerReplica` are float. +- The lowest replicas will be set to 1 when `min` is less than 1. ### Ladder Mode diff --git a/pkg/autoscaler/autoscaler_server.go b/pkg/autoscaler/autoscaler_server.go index fe57b8d5..bedfbf16 100644 --- a/pkg/autoscaler/autoscaler_server.go +++ b/pkg/autoscaler/autoscaler_server.go @@ -91,16 +91,16 @@ func (s *AutoScaler) pollAPIServer() { // Sync autoscaler ConfigMap with apiserver configMap, err := s.syncConfigWithServer() - if err != nil { + if err != nil || configMap == nil { glog.Errorf("Error syncing configMap with apiserver: %v", err) return } - // Only sync updated ConfigMap - if configMap.ObjectMeta.ResourceVersion != s.controller.GetParamsVersion() { - // Ensure corresponding controller type and scaling params + // Only sync updated ConfigMap or before controller is set. + if s.controller == nil || configMap.ObjectMeta.ResourceVersion != s.controller.GetParamsVersion() { + // Ensure corresponding controller type and scaling params. s.controller, err = plugin.EnsureController(s.controller, configMap) - if err != nil { + if err != nil || s.controller == nil { glog.Errorf("Error ensuring controller: %v", err) return } diff --git a/pkg/autoscaler/controller/linearcontroller/linear_controller.go b/pkg/autoscaler/controller/linearcontroller/linear_controller.go index 712e826c..eb9932d7 100644 --- a/pkg/autoscaler/controller/linearcontroller/linear_controller.go +++ b/pkg/autoscaler/controller/linearcontroller/linear_controller.go @@ -48,10 +48,11 @@ func NewLinearController() controller.Controller { } type linearParams struct { - CoresPerReplica float64 `json:"coresPerReplica"` - NodesPerReplica float64 `json:"nodesPerReplica"` - Min int `json:"min"` - Max int `json:"max"` + CoresPerReplica float64 `json:"coresPerReplica"` + NodesPerReplica float64 `json:"nodesPerReplica"` + Min int `json:"min"` + Max int `json:"max"` + PreventSinglePointFailure bool `json:"preventSinglePointFailure"` } func (c *LinearController) SyncConfig(configMap *apiv1.ConfigMap) error { @@ -107,6 +108,13 @@ func (c *LinearController) GetExpectedReplicas(status *k8sclient.ClusterStatus) func (c *LinearController) getExpectedReplicasFromParams(schedulableNodes, schedulableCores int) int { replicasFromCore := c.getExpectedReplicasFromParam(schedulableCores, c.params.CoresPerReplica) replicasFromNode := c.getExpectedReplicasFromParam(schedulableNodes, c.params.NodesPerReplica) + // Prevent single point of failure by having at least 2 replicas when + // there are more than one node. + if c.params.PreventSinglePointFailure && + schedulableNodes > 1 && + replicasFromNode < 2 { + replicasFromNode = 2 + } // Returns the results which yields the most replicas if replicasFromCore > replicasFromNode { diff --git a/pkg/autoscaler/controller/linearcontroller/linear_controller_test.go b/pkg/autoscaler/controller/linearcontroller/linear_controller_test.go index 154fcdd7..b1a457ee 100644 --- a/pkg/autoscaler/controller/linearcontroller/linear_controller_test.go +++ b/pkg/autoscaler/controller/linearcontroller/linear_controller_test.go @@ -42,7 +42,8 @@ func TestControllerParser(t *testing.T) { "coresPerReplica": 2, "nodesPerReplica": 1, "min": 1, - "max": 100 + "max": 100, + "preventSinglePointFailure": true }`, false, &linearParams{ @@ -50,6 +51,7 @@ func TestControllerParser(t *testing.T) { NodesPerReplica: 1, Min: 1, Max: 100, + PreventSinglePointFailure: true, }, }, { // Invalid JSON @@ -84,6 +86,18 @@ func TestControllerParser(t *testing.T) { true, &linearParams{}, }, + // Wrong input for PreventSinglePointFailure. + { + `{ + "coresPerReplica": 2, + "nodesPerReplica": 1, + "min": 1, + "max": 100, + "preventSinglePointFailure": invalid, + }`, + true, + &linearParams{}, + }, } for _, tc := range testCases { @@ -145,8 +159,9 @@ func TestScaleFromMultipleParams(t *testing.T) { testController.params = &linearParams{ CoresPerReplica: 2, NodesPerReplica: 2.5, - Min: 2, + Min: 1, Max: 100, + PreventSinglePointFailure: true, } testCases := []struct { @@ -154,7 +169,7 @@ func TestScaleFromMultipleParams(t *testing.T) { numNodes int expReplicas int }{ - {0, 0, 2}, + {0, 0, 1}, {1, 2, 2}, {2, 3, 2}, {3, 4, 2}, diff --git a/pkg/autoscaler/k8sclient/k8sclient.go b/pkg/autoscaler/k8sclient/k8sclient.go index df068222..29c0e262 100644 --- a/pkg/autoscaler/k8sclient/k8sclient.go +++ b/pkg/autoscaler/k8sclient/k8sclient.go @@ -141,9 +141,10 @@ func (k *k8sClient) GetClusterStatus() (clusterStatus *ClusterStatus, err error) opt := api.ListOptions{Watch: false} nodes, err := k.clientset.CoreClient.Nodes().List(opt) - if err != nil { + if err != nil || nodes == nil { return nil, err } + clusterStatus = &ClusterStatus{} clusterStatus.TotalNodes = int32(len(nodes.Items)) var tc resource.Quantity var sc resource.Quantity