Skip to content

Commit

Permalink
Merge pull request #23 from MrHohn/linear-preventSinglePointFailure
Browse files Browse the repository at this point in the history
Support "preventSinglePointFailure" option in linear controller
  • Loading branch information
bowei committed Feb 21, 2017
2 parents 08b5941 + 11f15bc commit 66eb800
Show file tree
Hide file tree
Showing 5 changed files with 49 additions and 18 deletions.
17 changes: 12 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,25 +66,32 @@ data:
"coresPerReplica": 2,
"nodesPerReplica": 1,
"min": 1,
"max": 100
"max": 100,
"preventSinglePointFailure": true
}
```

The equation of linear control mode as below:
```
replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )
replicas = min(replicas, max)
replicas = max(replicas, min)
```

Notice that both `coresPerReplica` and `nodesPerReplica` are float.
When `preventSinglePointFailure` is set to `true`, controller ensures at least 2 replicas
if there are more than one node.

For instance, given a cluster has 4 nodes and 13 cores. With above parameters, each replica could take care of 1 node.
So we need `4 / 1 = 4` replicas to take care of all 4 nodes. And each replica could take care of 2 cores. We need `ceil(13 / 2) = 7`
replicas to take care of all 13 cores. Controller will choose the greater one, which is `7` here, as the result.

Either one of the `coresPerReplica` or `nodesPerReplica` could be omitted. Both `min` and `max ` could be omitted.
If not set, `min` would be default to 1.
Either one of the `coresPerReplica` or `nodesPerReplica` could be omitted. All of `min`, `max` and
`preventSinglePointFailure` is optional. If not set, `min` would be default to `1`,
`preventSinglePointFailure` will be default to `false`.

The lowest number of replicas is set to 1.
Side notes:
- Both `coresPerReplica` and `nodesPerReplica` are float.
- The lowest replicas will be set to 1 when `min` is less than 1.

### Ladder Mode

Expand Down
10 changes: 5 additions & 5 deletions pkg/autoscaler/autoscaler_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,16 +91,16 @@ func (s *AutoScaler) pollAPIServer() {

// Sync autoscaler ConfigMap with apiserver
configMap, err := s.syncConfigWithServer()
if err != nil {
if err != nil || configMap == nil {
glog.Errorf("Error syncing configMap with apiserver: %v", err)
return
}

// Only sync updated ConfigMap
if configMap.ObjectMeta.ResourceVersion != s.controller.GetParamsVersion() {
// Ensure corresponding controller type and scaling params
// Only sync updated ConfigMap or before controller is set.
if s.controller == nil || configMap.ObjectMeta.ResourceVersion != s.controller.GetParamsVersion() {
// Ensure corresponding controller type and scaling params.
s.controller, err = plugin.EnsureController(s.controller, configMap)
if err != nil {
if err != nil || s.controller == nil {
glog.Errorf("Error ensuring controller: %v", err)
return
}
Expand Down
16 changes: 12 additions & 4 deletions pkg/autoscaler/controller/linearcontroller/linear_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,11 @@ func NewLinearController() controller.Controller {
}

type linearParams struct {
CoresPerReplica float64 `json:"coresPerReplica"`
NodesPerReplica float64 `json:"nodesPerReplica"`
Min int `json:"min"`
Max int `json:"max"`
CoresPerReplica float64 `json:"coresPerReplica"`
NodesPerReplica float64 `json:"nodesPerReplica"`
Min int `json:"min"`
Max int `json:"max"`
PreventSinglePointFailure bool `json:"preventSinglePointFailure"`
}

func (c *LinearController) SyncConfig(configMap *apiv1.ConfigMap) error {
Expand Down Expand Up @@ -107,6 +108,13 @@ func (c *LinearController) GetExpectedReplicas(status *k8sclient.ClusterStatus)
func (c *LinearController) getExpectedReplicasFromParams(schedulableNodes, schedulableCores int) int {
replicasFromCore := c.getExpectedReplicasFromParam(schedulableCores, c.params.CoresPerReplica)
replicasFromNode := c.getExpectedReplicasFromParam(schedulableNodes, c.params.NodesPerReplica)
// Prevent single point of failure by having at least 2 replicas when
// there are more than one node.
if c.params.PreventSinglePointFailure &&
schedulableNodes > 1 &&
replicasFromNode < 2 {
replicasFromNode = 2
}

// Returns the results which yields the most replicas
if replicasFromCore > replicasFromNode {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,16 @@ func TestControllerParser(t *testing.T) {
"coresPerReplica": 2,
"nodesPerReplica": 1,
"min": 1,
"max": 100
"max": 100,
"preventSinglePointFailure": true
}`,
false,
&linearParams{
CoresPerReplica: 2,
NodesPerReplica: 1,
Min: 1,
Max: 100,
PreventSinglePointFailure: true,
},
},
{ // Invalid JSON
Expand Down Expand Up @@ -84,6 +86,18 @@ func TestControllerParser(t *testing.T) {
true,
&linearParams{},
},
// Wrong input for PreventSinglePointFailure.
{
`{
"coresPerReplica": 2,
"nodesPerReplica": 1,
"min": 1,
"max": 100,
"preventSinglePointFailure": invalid,
}`,
true,
&linearParams{},
},
}

for _, tc := range testCases {
Expand Down Expand Up @@ -145,16 +159,17 @@ func TestScaleFromMultipleParams(t *testing.T) {
testController.params = &linearParams{
CoresPerReplica: 2,
NodesPerReplica: 2.5,
Min: 2,
Min: 1,
Max: 100,
PreventSinglePointFailure: true,
}

testCases := []struct {
numCores int
numNodes int
expReplicas int
}{
{0, 0, 2},
{0, 0, 1},
{1, 2, 2},
{2, 3, 2},
{3, 4, 2},
Expand Down
3 changes: 2 additions & 1 deletion pkg/autoscaler/k8sclient/k8sclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,10 @@ func (k *k8sClient) GetClusterStatus() (clusterStatus *ClusterStatus, err error)
opt := api.ListOptions{Watch: false}

nodes, err := k.clientset.CoreClient.Nodes().List(opt)
if err != nil {
if err != nil || nodes == nil {
return nil, err
}
clusterStatus = &ClusterStatus{}
clusterStatus.TotalNodes = int32(len(nodes.Items))
var tc resource.Quantity
var sc resource.Quantity
Expand Down

0 comments on commit 66eb800

Please sign in to comment.