Skip to content
This repository has been archived by the owner on Nov 17, 2022. It is now read-only.

[IS-618] - Add support for new node label schema #62

Merged
merged 2 commits into from Aug 20, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Expand Up @@ -57,12 +57,12 @@ For example you could add the following flags to your Kubelet:
```

### Building
If you wish to build the binary yourself; first make sure you have go installed and set up. Then clone this repo into your `$GOPATH` and download the dependencies using [`glide`](https://github.com/Masterminds/glide).
If you wish to build the binary yourself; first make sure you have go installed and set up. Then clone this repo into your `$GOPATH` and download the dependencies using [`dep`](https://github.com/golang/dep).

```bash
cd $GOPATH/src/github.com # Create this directory if it doesn't exist
git clone git@github.com:pusher/k8s-spot-rescheduler pusher/k8s-spot-rescheduler
glide install -v # Installs dependencies to vendor folder.
dep ensure -v # Installs dependencies to vendor folder.
```

Then build the code using `go build` which will produce the built binary in a file `k8s-spot-rescheduler`.
Expand Down
45 changes: 39 additions & 6 deletions nodes/nodes.go
Expand Up @@ -18,6 +18,7 @@ package nodes

import (
"sort"
"strings"

apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -27,9 +28,9 @@ import (

var (
// OnDemandNodeLabel label for on-demand instances.
OnDemandNodeLabel = "node-role.kubernetes.io/worker"
OnDemandNodeLabel = "kubernetes.io/role=worker"
// SpotNodeLabel label for spot instances.
SpotNodeLabel = "node-role.kubernetes.io/spot-worker"
SpotNodeLabel = "kubernetes.io/role=spot-worker"
// OnDemand key for on-demand instances of NodesMap.
OnDemand NodeType
// Spot key for spot instances of NodesMap.
Expand Down Expand Up @@ -157,14 +158,46 @@ func getPodCPURequests(pod *apiv1.Pod) int64 {

// Determines if a node has the spotNodeLabel assigned
func isSpotNode(node *apiv1.Node) bool {
_, found := node.ObjectMeta.Labels[SpotNodeLabel]
return found
splitLabel := strings.SplitN(SpotNodeLabel, "=", 2)

// If "=" found, check for new label schema. If no "=" is found, check for
// old label schema
switch len(splitLabel) {
case 1:
_, found := node.ObjectMeta.Labels[SpotNodeLabel]
return found
case 2:
spotLabelKey := splitLabel[0]
spotLabelVal := splitLabel[1]

val, _ := node.ObjectMeta.Labels[spotLabelKey]
if val == spotLabelVal {
return true
}
}
return false
}

// Determines if a node has the OnDemandNodeLabel assigned
func isOnDemandNode(node *apiv1.Node) bool {
_, found := node.ObjectMeta.Labels[OnDemandNodeLabel]
return found
splitLabel := strings.SplitN(OnDemandNodeLabel, "=", 2)

// If "=" found, check for new label schema. If no "=" is found, check for
// old label schema
switch len(splitLabel) {
case 1:
_, found := node.ObjectMeta.Labels[OnDemandNodeLabel]
return found
case 2:
onDemandLabelKey := splitLabel[0]
onDemandLabelVal := splitLabel[1]

val, _ := node.ObjectMeta.Labels[onDemandLabelKey]
if val == onDemandLabelVal {
return true
}
}
return false
}

// CopyNodeInfos returns an array of copies of the NodeInfos in this array.
Expand Down
33 changes: 31 additions & 2 deletions nodes/nodes_test.go
Expand Up @@ -29,12 +29,41 @@ import (
core "k8s.io/client-go/testing"
)

func TestIsSpotNode(t *testing.T) {
spotNode := createTestNodeWithLabel("fooSpotNode", 2000, map[string]string{"foo": "bar"})

SpotNodeLabel = "foo"
assert.True(t, isSpotNode(spotNode), "expected node with label 'foo' to be spot node")

SpotNodeLabel = "foo=bar"
assert.True(t, isSpotNode(spotNode), "expected node with label 'foo' and value 'bar' to be spot node")

SpotNodeLabel = "foo=baz"
assert.False(t, isSpotNode(spotNode), "expected node with label 'foo' and value 'bar' to not be spot node")
}

func TestIsOnDemandNode(t *testing.T) {
onDemandNode := createTestNodeWithLabel("fooDemandNode", 2000, map[string]string{"foo": "bar"})

OnDemandNodeLabel = "foo"
assert.True(t, isOnDemandNode(onDemandNode), "expected node with label 'foo' to be on demand node")

OnDemandNodeLabel = "foo=bar"
assert.True(t, isOnDemandNode(onDemandNode), "expected node with label 'foo' and value 'bar' to be on demand node")

OnDemandNodeLabel = "foo=baz"
assert.False(t, isOnDemandNode(onDemandNode), "expected node with label 'foo' and value 'bar' to not be on demand node")
}

func TestNewNodeMap(t *testing.T) {
OnDemandNodeLabel = "kubernetes.io/role=worker"
SpotNodeLabel = "kubernetes.io/role=spot-worker"

spotLabels := map[string]string{
SpotNodeLabel: "true",
"kubernetes.io/role": "spot-worker",
}
onDemandLabels := map[string]string{
OnDemandNodeLabel: "true",
"kubernetes.io/role": "worker",
}

nodes := []*apiv1.Node{
Expand Down
24 changes: 22 additions & 2 deletions rescheduler.go
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"net/http"
"os"
"strings"
"time"

"github.com/pusher/k8s-spot-rescheduler/metrics"
Expand Down Expand Up @@ -96,11 +97,11 @@ func main() {
// Add nodes labels as flags
flags.StringVar(&nodes.OnDemandNodeLabel,
"on-demand-node-label",
"node-role.kubernetes.io/worker",
"kubernetes.io/role=worker",
theobarberbany marked this conversation as resolved.
Show resolved Hide resolved
`Name of label on nodes to be considered for draining.`)
flags.StringVar(&nodes.SpotNodeLabel,
"spot-node-label",
"node-role.kubernetes.io/spot-worker",
"kubernetes.io/role=spot-worker",
`Name of label on nodes to be considered as targets for pods.`)

flags.Parse(os.Args)
Expand All @@ -110,6 +111,12 @@ func main() {
os.Exit(0)
}

err := validateArgs(nodes.OnDemandNodeLabel, nodes.SpotNodeLabel)
if err != nil {
fmt.Printf("Error: %s", err)
os.Exit(1)
}

glog.Infof("Running Rescheduler")

// Register metrics from metrics.go
Expand Down Expand Up @@ -419,3 +426,16 @@ func updateSpotNodeMetrics(spotNodeInfos nodes.NodeInfoArray, pdbs []*policyv1.P
func podID(pod *apiv1.Pod) string {
return fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)
}

// Checks that the node lablels provided as arguments are in fact, sane.
func validateArgs(OnDemandNodeLabel string, SpotNodeLabel string) error {
if len(strings.Split(OnDemandNodeLabel, "=")) > 2 {
return fmt.Errorf("the on demand node label is not correctly formatted: expected '<label_name>' or '<label_name>=<label_value>', but got %s", OnDemandNodeLabel)
}

if len(strings.Split(SpotNodeLabel, "=")) > 2 {
return fmt.Errorf("the spot node label is not correctly formatted: expected '<label_name>' or '<label_name>=<label_value>', but got %s", SpotNodeLabel)
}

return nil
}
18 changes: 18 additions & 0 deletions rescheduler_test.go
Expand Up @@ -70,6 +70,24 @@ func TestFindSpotNodeForPod(t *testing.T) {

}

func TestNodeLabelValidation(t *testing.T) {
onDemandLabel := "foo.bar/role=worker"
spotLabel := "foo.bar/node-role"

err := validateArgs(onDemandLabel, spotLabel)
assert.NoError(t, err)

onDemandLabel = "foo.bar/broken=worker=true"
err = validateArgs(onDemandLabel, spotLabel)
assert.EqualError(t, err, "the on demand node label is not correctly formatted: expected '<label_name>' or '<label_name>=<label_value>', but got foo.bar/broken=worker=true")

onDemandLabel = "foo.bar/role=worker"
spotLabel = "foo.bar/node-role=spot=fail"
err = validateArgs(onDemandLabel, spotLabel)
assert.EqualError(t, err, "the spot node label is not correctly formatted: expected '<label_name>' or '<label_name>=<label_value>', but got foo.bar/node-role=spot=fail")

}

func TestCanDrainNode(t *testing.T) {
predicateChecker := simulator.NewTestPredicateChecker()

Expand Down