Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add option to override default kubernetes scheduler #2013

Merged
merged 8 commits into from May 18, 2023
Merged
7 changes: 7 additions & 0 deletions api/v1/cluster_types.go
Expand Up @@ -135,6 +135,13 @@ type ClusterSpec struct {
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`

// If specified, the pod will be dispatched by specified Kubernetes
// scheduler. If not specified, the pod will be dispatched by the default
// scheduler. More info:
// https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/
// +optional
SchedulerName string `json:"schedulerName,omitempty"`

// The UID of the `postgres` user inside the image, defaults to `26`
// +kubebuilder:default:=26
PostgresUID int64 `json:"postgresUID,omitempty"`
Expand Down
5 changes: 5 additions & 0 deletions config/crd/bases/postgresql.cnpg.io_clusters.yaml
Expand Up @@ -2852,6 +2852,11 @@ spec:
to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
schedulerName:
description: 'If specified, the pod will be dispatched by specified
Kubernetes scheduler. If not specified, the pod will be dispatched
by the default scheduler. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/'
type: string
seccompProfile:
description: 'The SeccompProfile applied to every Pod and Container.
Defaults to: `RuntimeDefault`'
Expand Down
18 changes: 18 additions & 0 deletions controllers/cluster_upgrade.go
Expand Up @@ -286,6 +286,10 @@ func IsPodNeedingRollout(status postgres.PostgresqlStatus, cluster *apiv1.Cluste
return true, false, reason
}

if restartRequired, reason := isPodNeedingUpdatedScheduler(cluster, status.Pod); restartRequired {
return restartRequired, false, reason
}

// Detect changes in the postgres container configuration
for _, container := range status.Pod.Spec.Containers {
// we go to the next array element if it isn't the postgres container
Expand All @@ -306,6 +310,20 @@ func IsPodNeedingRollout(status postgres.PostgresqlStatus, cluster *apiv1.Cluste
true, "configuration needs a restart to apply some configuration changes"
}

// isPodNeedingUpdatedScheduler returns a boolean indicating if a restart is required and the relative message
func isPodNeedingUpdatedScheduler(cluster *apiv1.Cluster, pod corev1.Pod) (bool, string) {
if cluster.Spec.SchedulerName == "" || cluster.Spec.SchedulerName == pod.Spec.SchedulerName {
return false, ""
}

message := fmt.Sprintf(
"scheduler name changed from: '%s', to '%s'",
pod.Spec.SchedulerName,
cluster.Spec.SchedulerName,
)
return true, message
}

func isPodNeedingUpdateOfProjectedVolume(cluster *apiv1.Cluster, pod corev1.Pod) (needsUpdate bool, reason string) {
currentProjectedVolumeConfiguration := getProjectedVolumeConfigurationFromPod(pod)

Expand Down
24 changes: 19 additions & 5 deletions controllers/cluster_upgrade_test.go
Expand Up @@ -28,11 +28,16 @@ import (
)

var _ = Describe("Pod upgrade", func() {
cluster := apiv1.Cluster{
Spec: apiv1.ClusterSpec{
ImageName: "postgres:13.0",
},
}
var cluster apiv1.Cluster

BeforeEach(func() {
cluster = apiv1.Cluster{
Spec: apiv1.ClusterSpec{
ImageName: "postgres:13.0",
},
}
})

It("will not require a restart for just created Pods", func() {
pod := specs.PodWithExistingStorage(cluster, 1)
Expect(isPodNeedingRestart(&cluster, postgres.PostgresqlStatus{Pod: *pod})).
Expand Down Expand Up @@ -93,6 +98,15 @@ var _ = Describe("Pod upgrade", func() {
Expect(reason).To(BeEquivalentTo("configuration needs a restart to apply some configuration changes"))
})

It("should trigger a rollout when the scheduler changes", func() {
pod := specs.PodWithExistingStorage(cluster, 1)
cluster.Spec.SchedulerName = "newScheduler"

rollout, reason := isPodNeedingUpdatedScheduler(&cluster, *pod)
Expect(rollout).To(BeTrue())
Expect(reason).ToNot(BeEmpty())
})

When("there's a custom environment variable set", func() {
It("detects when a new custom environment variable is set", func() {
pod := specs.PodWithExistingStorage(cluster, 1)
Expand Down
1 change: 1 addition & 0 deletions docs/src/api_reference.md
Expand Up @@ -360,6 +360,7 @@ Name | Description
`inheritedMetadata ` | Metadata that will be inherited by all objects related to the Cluster | [*EmbeddedObjectMetadata](#EmbeddedObjectMetadata)
`imageName ` | Name of the container image, supporting both tags (`<image>:<tag>`) and digests for deterministic and repeatable deployments (`<image>:<tag>@sha256:<digestValue>`) | string
`imagePullPolicy ` | Image pull policy. One of `Always`, `Never` or `IfNotPresent`. If not defined, it defaults to `IfNotPresent`. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | corev1.PullPolicy
`schedulerName ` | If specified, the pod will be dispatched by specified Kubernetes scheduler. If not specified, the pod will be dispatched by the default scheduler. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ | string
`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64
`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64
`instances ` | Number of instances required in the cluster - *mandatory* | int
Expand Down
1 change: 1 addition & 0 deletions internal/cmd/plugin/pgbench/pgbench.go
Expand Up @@ -161,6 +161,7 @@ func (cmd *pgBenchCommand) buildJob(cluster apiv1.Cluster) *batchv1.Job {
},
},
},
SchedulerName: cluster.Spec.SchedulerName,
Containers: []corev1.Container{
{
Name: "pgbench",
Expand Down
1 change: 1 addition & 0 deletions pkg/specs/jobs.go
Expand Up @@ -271,6 +271,7 @@ func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initC
InitContainers: []corev1.Container{
createBootstrapContainer(cluster),
},
SchedulerName: cluster.Spec.SchedulerName,
Containers: []corev1.Container{
{
Name: string(role),
Expand Down
5 changes: 3 additions & 2 deletions pkg/specs/pods.go
Expand Up @@ -352,8 +352,9 @@ func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod {
InitContainers: []corev1.Container{
createBootstrapContainer(cluster),
},
Containers: createPostgresContainers(cluster, envConfig),
Volumes: createPostgresVolumes(cluster, podName),
SchedulerName: cluster.Spec.SchedulerName,
Containers: createPostgresContainers(cluster, envConfig),
Volumes: createPostgresVolumes(cluster, podName),
SecurityContext: CreatePodSecurityContext(
cluster.GetSeccompProfile(),
cluster.GetPostgresUID(),
Expand Down