Skip to content

Commit

Permalink
(squash) merge configmaps separately and update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
damemi committed Aug 20, 2021
1 parent 823d656 commit d51deff
Show file tree
Hide file tree
Showing 6 changed files with 38 additions and 46 deletions.
Original file line number Diff line number Diff line change
@@ -1,14 +1,5 @@
apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
leaderElection:
leaseDuration: "137s"
renewDeadline: "107s"
retryPeriod: "26s"
leaderElect: true
resourceNamespace: "openshift-kube-scheduler"
resourceLock: "configmaps"
profiles:
- schedulerName: default-scheduler
plugins:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,2 @@
apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
leaderElection:
leaseDuration: "137s"
renewDeadline: "107s"
retryPeriod: "26s"
leaderElect: true
resourceNamespace: "openshift-kube-scheduler"
resourceLock: "configmaps"
Original file line number Diff line number Diff line change
@@ -1,14 +1,5 @@
apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
leaderElection:
leaseDuration: "137s"
renewDeadline: "107s"
retryPeriod: "26s"
leaderElect: true
resourceNamespace: "openshift-kube-scheduler"
resourceLock: "configmaps"
profiles:
- schedulerName: default-scheduler
plugins:
Expand Down
9 changes: 9 additions & 0 deletions bindata/assets/config/defaultconfig.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,11 @@
apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
leaderElection:
leaseDuration: "137s"
renewDeadline: "107s"
retryPeriod: "26s"
leaderElect: true
resourceNamespace: "openshift-kube-scheduler"
resourceLock: "configmaps"
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,8 @@ func manageKubeSchedulerConfigMap_v311_00_to_latest(ctx context.Context, client
}
}

requiredConfigMap, _, err := resourcemerge.MergeConfigMap(configMap, "config.yaml", nil, kubeSchedulerConfiguration)
defaultConfig := bindata.MustAsset("assets/config/defaultconfig.yaml")
requiredConfigMap, _, err := resourcemerge.MergeConfigMap(configMap, "config.yaml", nil, kubeSchedulerConfiguration, defaultConfig)
if err != nil {
return nil, false, err
}
Expand Down
45 changes: 27 additions & 18 deletions pkg/operator/targetconfigcontroller/targetconfigcontroller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package targetconfigcontroller
import (
"context"
"fmt"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
"io/ioutil"
"os"
"reflect"
Expand Down Expand Up @@ -53,12 +55,12 @@ var configUnknown = &configv1.Scheduler{
Profile: "unknown-config",
},
}

var defaultConfig string = string(bindata.MustAsset("assets/config/defaultconfig.yaml"))
var schedConfigLowNodeUtilization string = string(bindata.MustAsset(
"assets/config/defaultconfig-postbootstrap-lownodeutilization.yaml"))
var schedConfigHighNodeUtilization string = string(bindata.MustAsset(
"assets/config/defaultconfig-postbootstrap-highnodeutilization.yaml"))
var schedConfigcNoScoring string = string(bindata.MustAsset(
var schedConfigNoScoring string = string(bindata.MustAsset(
"assets/config/defaultconfig-postbootstrap-noscoring.yaml"))

var configMapLowNodeUtilization = &corev1.ConfigMap{
Expand All @@ -85,7 +87,7 @@ var configMapNoScoring = &corev1.ConfigMap{
Name: "config",
Namespace: "openshift-kube-scheduler",
},
Data: map[string]string{"config.yaml": schedConfigcNoScoring},
Data: map[string]string{"config.yaml": schedConfigNoScoring},
}

func Test_manageKubeSchedulerConfigMap_v311_00_to_latest(t *testing.T) {
Expand All @@ -97,11 +99,12 @@ func Test_manageKubeSchedulerConfigMap_v311_00_to_latest(t *testing.T) {
configSchedulerLister configlistersv1.SchedulerLister
}
tests := []struct {
name string
args args
want *corev1.ConfigMap
want1 bool
wantErr bool
name string
args args
want *corev1.ConfigMap
wantConfig string
want1 bool
wantErr bool
}{
{
name: "unknown-cluster",
Expand Down Expand Up @@ -135,9 +138,10 @@ func Test_manageKubeSchedulerConfigMap_v311_00_to_latest(t *testing.T) {
Items: map[string]*configv1.Scheduler{"cluster": configLowNodeUtilization},
},
},
want: configMapLowNodeUtilization,
want1: true,
wantErr: false,
want: configMapLowNodeUtilization,
wantConfig: schedConfigLowNodeUtilization,
want1: true,
wantErr: false,
},
{
name: "high-node-utilization",
Expand All @@ -147,9 +151,10 @@ func Test_manageKubeSchedulerConfigMap_v311_00_to_latest(t *testing.T) {
Items: map[string]*configv1.Scheduler{"cluster": configHighNodeUtilization},
},
},
want: configMapHighNodeUtilization,
want1: true,
wantErr: false,
want: configMapHighNodeUtilization,
wantConfig: schedConfigHighNodeUtilization,
want1: true,
wantErr: false,
},
{
name: "no-scoring",
Expand All @@ -159,9 +164,10 @@ func Test_manageKubeSchedulerConfigMap_v311_00_to_latest(t *testing.T) {
Items: map[string]*configv1.Scheduler{"cluster": configNoScoring},
},
},
want: configMapNoScoring,
want1: true,
wantErr: false,
want: configMapNoScoring,
wantConfig: schedConfigNoScoring,
want1: true,
wantErr: false,
},
}
for _, tt := range tests {
Expand All @@ -172,7 +178,10 @@ func Test_manageKubeSchedulerConfigMap_v311_00_to_latest(t *testing.T) {
t.Errorf("manageKubeSchedulerConfigMap_v311_00_to_latest() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
configMap := resourceread.ReadConfigMapV1OrDie(bindata.MustAsset("assets/kube-scheduler/cm.yaml"))
requiredConfigMap, _, _ := resourcemerge.MergeConfigMap(configMap, "config.yaml", nil, []byte(tt.wantConfig), []byte(defaultConfig))

if !equality.Semantic.DeepEqual(got, requiredConfigMap) {
t.Errorf("manageKubeSchedulerConfigMap_v311_00_to_latest() got = %v, want %v", got, tt.want)
}
if got1 != tt.want1 {
Expand Down

0 comments on commit d51deff

Please sign in to comment.