diff --git a/docs/Manual/Deployment/Kubernetes/DeploymentResource.md b/docs/Manual/Deployment/Kubernetes/DeploymentResource.md index cb6df106e..cca1996dd 100644 --- a/docs/Manual/Deployment/Kubernetes/DeploymentResource.md +++ b/docs/Manual/Deployment/Kubernetes/DeploymentResource.md @@ -377,25 +377,22 @@ Specifies a maximum for the count of servers. If set, a specification is invalid This setting specifies additional commandline arguments passed to all servers of this group. The default value is an empty array. -### `spec..resources.requests.cpu: cpuUnit` +### `spec..resources: ResourceRequirements` -This setting specifies the amount of CPU requested by server of this group. +This setting specifies the resources required by pods of this group. This includes requests and limits. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ for details. -### `spec..resources.requests.memory: memoryUnit` +### `spec..volumeClaimTemplate.Spec: PersistentVolumeClaimSpec` -This setting specifies the amount of memory requested by server of this group. +Specifies a volumeClaimTemplate used by operator to create to volume claims for pods of this group. +This setting is not available for group `coordinators`, `syncmasters` & `syncworkers`. -See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ for details. - -### `spec..resources.requests.storage: storageUnit` +The default value describes a volume with `8Gi` storage, `ReadWriteOnce` access mode and volume mode set to `PersistentVolumeFilesystem`. -This setting specifies the amount of storage required for each server of this group. -The default value is `8Gi`. - -This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` -because servers in these groups do not need persistent storage. +If this field is not set and `spec..resources.requests.storage` is set, then a default volume claim +with size as specified by `spec..resources.requests.storage` will be created. In that case `storage` +and `iops` is not forwarded to the pods resource requirements. ### `spec..serviceAccountName: string` @@ -405,14 +402,6 @@ for each server of this group. Using an alternative `ServiceAccount` is typically used to separate access rights. The ArangoDB deployments do not require any special rights. -### `spec..storageClassName: string` - -This setting specifies the `storageClass` for the `PersistentVolume`s created -for each server of this group. - -This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` -because servers in these groups do not need persistent storage. - ### `spec..priorityClassName: string` Priority class name for pods of this group. Will be forwarded to the pod spec. [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) @@ -443,3 +432,29 @@ For more information on tolerations, consult the [Kubernetes documentation](http This setting specifies a set of labels to be used as `nodeSelector` for Pods of this node. For more information on node selectors, consult the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/). + +## Deprecated Fields + +### `spec..resources.requests.storage: storageUnit` + +This setting specifies the amount of storage required for each server of this group. +The default value is `8Gi`. + +This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` +because servers in these groups do not need persistent storage. + +Please use VolumeClaimTemplate from now on. This field is not considered if +VolumeClaimTemplate is set. Note however, that the information in requests +is completely handed over to the pod in this case. + +### `spec..storageClassName: string` + +This setting specifies the `storageClass` for the `PersistentVolume`s created +for each server of this group. + +This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` +because servers in these groups do not need persistent storage. + +Please use VolumeClaimTemplate from now on. This field is not considered if +VolumeClaimTemplate is set. Note however, that the information in requests +is completely handed over to the pod in this case. \ No newline at end of file diff --git a/pkg/apis/deployment/v1alpha/server_group_spec.go b/pkg/apis/deployment/v1alpha/server_group_spec.go index 2ffd5764f..10c55cd27 100644 --- a/pkg/apis/deployment/v1alpha/server_group_spec.go +++ b/pkg/apis/deployment/v1alpha/server_group_spec.go @@ -60,6 +60,8 @@ type ServerGroupSpec struct { Probes *ServerGroupProbesSpec `json:"probes,omitempty"` // PriorityClassName specifies a priority class name PriorityClassName string `json:"priorityClassName,omitempty"` + // VolumeClaimTemplate specifies a template for volume claims + VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` // Sidecars specifies a list of additional containers to be started Sidecars []v1.Container `json:"sidecars,omitempty"` } @@ -101,6 +103,16 @@ func (s ServerGroupProbesSpec) IsReadinessProbeDisabled() bool { return util.BoolOrDefault(s.ReadinessProbeDisabled) } +// HasVolumeClaimTemplate returns whether there is a volumeClaimTemplate or not +func (s ServerGroupSpec) HasVolumeClaimTemplate() bool { + return s.VolumeClaimTemplate != nil +} + +// GetVolumeClaimTemplate returns a pointer to a volume claim template or nil if none is specified +func (s ServerGroupSpec) GetVolumeClaimTemplate() *v1.PersistentVolumeClaim { + return s.VolumeClaimTemplate +} + // GetCount returns the value of count. func (s ServerGroupSpec) GetCount() int { return util.IntOrDefault(s.Count) @@ -243,13 +255,25 @@ func (s *ServerGroupSpec) SetDefaults(group ServerGroup, used bool, mode Deploym s.MinCount = nil s.MaxCount = nil } - if _, found := s.Resources.Requests[v1.ResourceStorage]; !found { - switch group { - case ServerGroupSingle, ServerGroupAgents, ServerGroupDBServers: - if s.Resources.Requests == nil { - s.Resources.Requests = make(map[v1.ResourceName]resource.Quantity) + if !s.HasVolumeClaimTemplate() { + if _, found := s.Resources.Requests[v1.ResourceStorage]; !found { + switch group { + case ServerGroupSingle, ServerGroupAgents, ServerGroupDBServers: + volumeMode := v1.PersistentVolumeFilesystem + s.VolumeClaimTemplate = &v1.PersistentVolumeClaim{ + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + VolumeMode: &volumeMode, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse("8Gi"), + }, + }, + }, + } } - s.Resources.Requests[v1.ResourceStorage] = resource.MustParse("8Gi") } } } @@ -294,6 +318,9 @@ func (s *ServerGroupSpec) SetDefaultsFrom(source ServerGroupSpec) { } setDefaultsFromResourceList(&s.Resources.Limits, source.Resources.Limits) setDefaultsFromResourceList(&s.Resources.Requests, source.Resources.Requests) + if s.VolumeClaimTemplate == nil { + s.VolumeClaimTemplate = source.VolumeClaimTemplate.DeepCopy() + } } // ResetImmutableFields replaces all immutable fields in the given target with values from the source spec. @@ -306,5 +333,9 @@ func (s ServerGroupSpec) ResetImmutableFields(group ServerGroup, fieldPrefix str resetFields = append(resetFields, fieldPrefix+".count") } } + if s.HasVolumeClaimTemplate() != target.HasVolumeClaimTemplate() { + target.VolumeClaimTemplate = s.GetVolumeClaimTemplate() + resetFields = append(resetFields, fieldPrefix+".volumeClaimTemplate") + } return resetFields } diff --git a/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go b/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go index f27eaf1d8..7ebf5a44c 100644 --- a/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go +++ b/pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go @@ -801,6 +801,11 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) { *out = new(ServerGroupProbesSpec) (*in).DeepCopyInto(*out) } + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } if in.Sidecars != nil { in, out := &in.Sidecars, &out.Sidecars *out = make([]v1.Container, len(*in)) diff --git a/pkg/deployment/context_impl.go b/pkg/deployment/context_impl.go index a65a4dd39..c56d79717 100644 --- a/pkg/deployment/context_impl.go +++ b/pkg/deployment/context_impl.go @@ -118,6 +118,23 @@ func (d *Deployment) UpdateStatus(status api.DeploymentStatus, lastVersion int32 return nil } +// UpdateMember updates the deployment status wrt the given member. +func (d *Deployment) UpdateMember(member api.MemberStatus) error { + status, lastVersion := d.GetStatus() + _, group, found := status.Members.ElementByID(member.ID) + if !found { + return maskAny(fmt.Errorf("Member %s not found", member.ID)) + } + if err := status.Members.Update(member, group); err != nil { + return maskAny(err) + } + if err := d.UpdateStatus(status, lastVersion); err != nil { + log.Debug().Err(err).Msg("Updating CR status failed") + return maskAny(err) + } + return nil +} + // GetDatabaseClient returns a cached client for the entire database (cluster coordinators or single server), // creating one if needed. func (d *Deployment) GetDatabaseClient(ctx context.Context) (driver.Client, error) { diff --git a/pkg/deployment/images.go b/pkg/deployment/images.go index f8461aefe..89547a41a 100644 --- a/pkg/deployment/images.go +++ b/pkg/deployment/images.go @@ -30,7 +30,7 @@ import ( "time" "github.com/rs/zerolog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -198,7 +198,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, ima } } if err := k8sutil.CreateArangodPod(ib.KubeCli, true, ib.APIObject, role, id, podName, "", image, "", "", ib.Spec.GetImagePullPolicy(), "", false, terminationGracePeriod, args, env, nil, nil, nil, - tolerations, serviceAccountName, "", "", "", nil, "", v1.ResourceRequirements{}, nil, nil); err != nil { + tolerations, serviceAccountName, "", "", "", nil, "", v1.ResourceRequirements{}, nil, nil, nil); err != nil { log.Debug().Err(err).Msg("Failed to create image ID pod") return true, maskAny(err) } diff --git a/pkg/deployment/reconcile/context.go b/pkg/deployment/reconcile/context.go index bf98aab60..c8e692a25 100644 --- a/pkg/deployment/reconcile/context.go +++ b/pkg/deployment/reconcile/context.go @@ -46,6 +46,8 @@ type Context interface { // UpdateStatus replaces the status of the deployment with the given status and // updates the resources in k8s. UpdateStatus(status api.DeploymentStatus, lastVersion int32, force ...bool) error + // UpdateMember updates the deployment status wrt the given member. + UpdateMember(member api.MemberStatus) error // GetDatabaseClient returns a cached client for the entire database (cluster coordinators or single server), // creating one if needed. GetDatabaseClient(ctx context.Context) (driver.Client, error) diff --git a/pkg/deployment/reconcile/plan_builder.go b/pkg/deployment/reconcile/plan_builder.go index 7d41cff7e..fab2bde24 100644 --- a/pkg/deployment/reconcile/plan_builder.go +++ b/pkg/deployment/reconcile/plan_builder.go @@ -383,9 +383,14 @@ func podNeedsRotation(log zerolog.Logger, p v1.Pod, apiObject metav1.Object, spe } // Check resource requirements - if resourcesRequireRotation( - k8sutil.FilterStorageResourceRequirement(spec.GetServerGroupSpec(group).Resources), - k8sutil.GetArangoDBContainerFromPod(&p).Resources) { + var resources v1.ResourceRequirements + if groupSpec.HasVolumeClaimTemplate() { + resources = groupSpec.Resources // If there is a volume claim template compare all resources + } else { + resources = k8sutil.ExtractPodResourceRequirement(groupSpec.Resources) + } + + if resourcesRequireRotation(resources, k8sutil.GetArangoDBContainerFromPod(&p).Resources) { return true, "Resource Requirements changed" } diff --git a/pkg/deployment/reconcile/reconciler.go b/pkg/deployment/reconcile/reconciler.go index 804baa06a..8b667b1d1 100644 --- a/pkg/deployment/reconcile/reconciler.go +++ b/pkg/deployment/reconcile/reconciler.go @@ -63,7 +63,8 @@ func (r *Reconciler) CheckDeployment() error { r.log.Error().Err(err).Msg("Failed to delete pod") } m.Phase = api.MemberPhaseNone - if err := status.Members.Update(m, api.ServerGroupCoordinators); err != nil { + + if err := r.context.UpdateMember(m); err != nil { r.log.Error().Err(err).Msg("Failed to update member") } } diff --git a/pkg/deployment/resources/pod_creator.go b/pkg/deployment/resources/pod_creator.go index 6eefdaaff..66b84ac61 100644 --- a/pkg/deployment/resources/pod_creator.go +++ b/pkg/deployment/resources/pod_creator.go @@ -41,7 +41,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" "github.com/pkg/errors" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -660,7 +660,7 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string, finalizers := r.createPodFinalizers(group) if err := k8sutil.CreateArangodPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, m.PersistentVolumeClaimName, imageInfo.ImageID, lifecycleImage, alpineImage, spec.GetImagePullPolicy(), engine, requireUUID, terminationGracePeriod, args, env, finalizers, livenessProbe, readinessProbe, tolerations, serviceAccountName, tlsKeyfileSecretName, rocksdbEncryptionSecretName, - clusterJWTSecretName, groupSpec.GetNodeSelector(), groupSpec.PriorityClassName, groupSpec.Resources, exporter, groupSpec.GetSidecars()); err != nil { + clusterJWTSecretName, groupSpec.GetNodeSelector(), groupSpec.PriorityClassName, groupSpec.Resources, exporter, groupSpec.GetSidecars(), groupSpec.VolumeClaimTemplate); err != nil { return maskAny(err) } log.Debug().Str("pod-name", m.PodName).Msg("Created pod") diff --git a/pkg/deployment/resources/pvc_inspector.go b/pkg/deployment/resources/pvc_inspector.go index 495569602..fd21deb18 100644 --- a/pkg/deployment/resources/pvc_inspector.go +++ b/pkg/deployment/resources/pvc_inspector.go @@ -83,24 +83,41 @@ func (r *Resources) InspectPVCs(ctx context.Context) (util.Interval, error) { // Resize inspector groupSpec := spec.GetServerGroupSpec(group) - if requestedSize, ok := groupSpec.Resources.Requests[apiv1.ResourceStorage]; ok { - if volumeSize, ok := p.Spec.Resources.Requests[apiv1.ResourceStorage]; ok { - cmp := volumeSize.Cmp(requestedSize) - if cmp < 0 { - // Size of the volume is smaller than the requested size - // Update the pvc with the request size - p.Spec.Resources.Requests[apiv1.ResourceStorage] = requestedSize - - log.Debug().Str("pvc-capacity", volumeSize.String()).Str("requested", requestedSize.String()).Msg("PVC capacity differs - updating") - kube := r.context.GetKubeCli() - if _, err := kube.CoreV1().PersistentVolumeClaims(r.context.GetNamespace()).Update(&p); err != nil { - log.Error().Err(err).Msg("Failed to update pvc") - } + + if groupSpec.HasVolumeClaimTemplate() { + res := groupSpec.GetVolumeClaimTemplate().Spec.Resources.Requests + // For pvc only resources.requests is mutable + if compareResourceList(p.Spec.Resources.Requests, res) { + p.Spec.Resources.Requests = res + log.Debug().Msg("volumeClaimTemplate requested resources changed - updating") + kube := r.context.GetKubeCli() + if _, err := kube.CoreV1().PersistentVolumeClaims(r.context.GetNamespace()).Update(&p); err != nil { + log.Error().Err(err).Msg("Failed to update pvc") + } else { r.context.CreateEvent(k8sutil.NewPVCResizedEvent(r.context.GetAPIObject(), p.Name)) - } else if cmp > 0 { - log.Error().Str("server-group", group.AsRole()).Str("pvc-storage-size", volumeSize.String()).Str("requested-size", requestedSize.String()). - Msg("Volume size should not shrink") - r.context.CreateEvent(k8sutil.NewCannotShrinkVolumeEvent(r.context.GetAPIObject(), p.Name)) + } + } + } else { + if requestedSize, ok := groupSpec.Resources.Requests[apiv1.ResourceStorage]; ok { + if volumeSize, ok := p.Spec.Resources.Requests[apiv1.ResourceStorage]; ok { + cmp := volumeSize.Cmp(requestedSize) + if cmp < 0 { + // Size of the volume is smaller than the requested size + // Update the pvc with the request size + p.Spec.Resources.Requests[apiv1.ResourceStorage] = requestedSize + + log.Debug().Str("pvc-capacity", volumeSize.String()).Str("requested", requestedSize.String()).Msg("PVC capacity differs - updating") + kube := r.context.GetKubeCli() + if _, err := kube.CoreV1().PersistentVolumeClaims(r.context.GetNamespace()).Update(&p); err != nil { + log.Error().Err(err).Msg("Failed to update pvc") + } else { + r.context.CreateEvent(k8sutil.NewPVCResizedEvent(r.context.GetAPIObject(), p.Name)) + } + } else if cmp > 0 { + log.Error().Str("server-group", group.AsRole()).Str("pvc-storage-size", volumeSize.String()).Str("requested-size", requestedSize.String()). + Msg("Volume size should not shrink") + r.context.CreateEvent(k8sutil.NewCannotShrinkVolumeEvent(r.context.GetAPIObject(), p.Name)) + } } } } @@ -118,3 +135,21 @@ func (r *Resources) InspectPVCs(ctx context.Context) (util.Interval, error) { return nextInterval, nil } + +func compareResourceList(wanted, given apiv1.ResourceList) bool { + for k, v := range wanted { + if gv, ok := given[k]; !ok { + return true + } else if v.Cmp(gv) != 0 { + return true + } + } + + for k := range given { + if _, ok := wanted[k]; !ok { + return true + } + } + + return false +} diff --git a/pkg/deployment/resources/pvcs.go b/pkg/deployment/resources/pvcs.go index 6483b03a2..b0af174cc 100644 --- a/pkg/deployment/resources/pvcs.go +++ b/pkg/deployment/resources/pvcs.go @@ -54,8 +54,9 @@ func (r *Resources) EnsurePVCs() error { storageClassName := spec.GetStorageClassName() role := group.AsRole() resources := spec.Resources + vct := spec.VolumeClaimTemplate finalizers := r.createPVCFinalizers(group) - if err := k8sutil.CreatePersistentVolumeClaim(pvcs, m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, enforceAntiAffinity, resources, finalizers, owner); err != nil { + if err := k8sutil.CreatePersistentVolumeClaim(pvcs, m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, enforceAntiAffinity, resources, vct, finalizers, owner); err != nil { return maskAny(err) } } diff --git a/pkg/util/k8sutil/pods.go b/pkg/util/k8sutil/pods.go index 38ff74161..6e0760574 100644 --- a/pkg/util/k8sutil/pods.go +++ b/pkg/util/k8sutil/pods.go @@ -31,7 +31,7 @@ import ( "time" "github.com/arangodb/kube-arangodb/pkg/util/constants" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) @@ -304,13 +304,13 @@ func arangodInitContainer(name, id, engine, alpineImage string, requireUUID bool return c } -// FilterStorageResourceRequirement filters resource requirements for Storage. -func FilterStorageResourceRequirement(resources v1.ResourceRequirements) v1.ResourceRequirements { +// ExtractPodResourceRequirement filters resource requirements for Pods. +func ExtractPodResourceRequirement(resources v1.ResourceRequirements) v1.ResourceRequirements { filterStorage := func(list v1.ResourceList) v1.ResourceList { newlist := make(v1.ResourceList) for k, v := range list { - if k == v1.ResourceStorage { + if k != v1.ResourceCPU && k != v1.ResourceMemory { continue } newlist[k] = v @@ -326,7 +326,7 @@ func FilterStorageResourceRequirement(resources v1.ResourceRequirements) v1.Reso // arangodContainer creates a container configured to run `arangod`. func arangodContainer(image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig, readinessProbe *HTTPProbeConfig, - lifecycle *v1.Lifecycle, lifecycleEnvVars []v1.EnvVar, resources v1.ResourceRequirements) v1.Container { + lifecycle *v1.Lifecycle, lifecycleEnvVars []v1.EnvVar, resources v1.ResourceRequirements, noFilterResources bool) v1.Container { c := v1.Container{ Command: append([]string{"/usr/sbin/arangod"}, args...), Name: ServerContainerName, @@ -340,9 +340,14 @@ func arangodContainer(image string, imagePullPolicy v1.PullPolicy, args []string Protocol: v1.ProtocolTCP, }, }, - Resources: FilterStorageResourceRequirement(resources), // Storage is handled via pvcs VolumeMounts: arangodVolumeMounts(), } + if noFilterResources { + c.Resources = resources // if volumeclaimtemplate is specified + } else { + c.Resources = ExtractPodResourceRequirement(resources) // Storage is handled via pvcs + } + for k, v := range env { c.Env = append(c.Env, v.CreateEnvVar(k)) } @@ -376,7 +381,7 @@ func arangosyncContainer(image string, imagePullPolicy v1.PullPolicy, args []str Protocol: v1.ProtocolTCP, }, }, - Resources: FilterStorageResourceRequirement(resources), // Storage is handled via pvcs + Resources: resources, } for k, v := range env { c.Env = append(c.Env, v.CreateEnvVar(k)) @@ -530,7 +535,8 @@ func CreateArangodPod(kubecli kubernetes.Interface, developmentMode bool, deploy args []string, env map[string]EnvValue, finalizers []string, livenessProbe *HTTPProbeConfig, readinessProbe *HTTPProbeConfig, tolerations []v1.Toleration, serviceAccountName string, tlsKeyfileSecretName, rocksdbEncryptionSecretName string, clusterJWTSecretName string, nodeSelector map[string]string, - podPriorityClassName string, resources v1.ResourceRequirements, exporter *ArangodbExporterContainerConf, sidecars []v1.Container) error { + podPriorityClassName string, resources v1.ResourceRequirements, exporter *ArangodbExporterContainerConf, sidecars []v1.Container, vct *v1.PersistentVolumeClaim) error { + // Prepare basic pod p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id, podName, finalizers, tolerations, serviceAccountName, nodeSelector) terminationGracePeriodSeconds := int64(math.Ceil(terminationGracePeriod.Seconds())) @@ -553,7 +559,7 @@ func CreateArangodPod(kubecli kubernetes.Interface, developmentMode bool, deploy } // Add arangod container - c := arangodContainer(image, imagePullPolicy, args, env, livenessProbe, readinessProbe, lifecycle, lifecycleEnvVars, resources) + c := arangodContainer(image, imagePullPolicy, args, env, livenessProbe, readinessProbe, lifecycle, lifecycleEnvVars, resources, vct != nil) if tlsKeyfileSecretName != "" { c.VolumeMounts = append(c.VolumeMounts, tlsKeyfileVolumeMounts()...) } diff --git a/pkg/util/k8sutil/pvc.go b/pkg/util/k8sutil/pvc.go index b9ae8c1ea..b12d6a6bf 100644 --- a/pkg/util/k8sutil/pvc.go +++ b/pkg/util/k8sutil/pvc.go @@ -58,10 +58,30 @@ func CreatePersistentVolumeClaimName(deploymentName, role, id string) string { return deploymentName + "-" + role + "-" + stripArangodPrefix(id) } +// ExtractStorageResourceRequirement filters resource requirements for Pods. +func ExtractStorageResourceRequirement(resources v1.ResourceRequirements) v1.ResourceRequirements { + + filterStorage := func(list v1.ResourceList) v1.ResourceList { + newlist := make(v1.ResourceList) + for k, v := range list { + if k != v1.ResourceStorage && k != "iops" { + continue + } + newlist[k] = v + } + return newlist + } + + return v1.ResourceRequirements{ + Limits: filterStorage(resources.Limits), + Requests: filterStorage(resources.Requests), + } +} + // CreatePersistentVolumeClaim creates a persistent volume claim with given name and configuration. // If the pvc already exists, nil is returned. // If another error occurs, that error is returned. -func CreatePersistentVolumeClaim(pvcs PersistentVolumeClaimInterface, pvcName, deploymentName, ns, storageClassName, role string, enforceAntiAffinity bool, resources v1.ResourceRequirements, finalizers []string, owner metav1.OwnerReference) error { +func CreatePersistentVolumeClaim(pvcs PersistentVolumeClaimInterface, pvcName, deploymentName, ns, storageClassName, role string, enforceAntiAffinity bool, resources v1.ResourceRequirements, vct *v1.PersistentVolumeClaim, finalizers []string, owner metav1.OwnerReference) error { labels := LabelsForDeployment(deploymentName, role) volumeMode := v1.PersistentVolumeFilesystem pvc := &v1.PersistentVolumeClaim{ @@ -73,14 +93,19 @@ func CreatePersistentVolumeClaim(pvcs PersistentVolumeClaimInterface, pvcName, d constants.AnnotationEnforceAntiAffinity: strconv.FormatBool(enforceAntiAffinity), }, }, - Spec: v1.PersistentVolumeClaimSpec{ + } + if vct == nil { + pvc.Spec = v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, }, VolumeMode: &volumeMode, - Resources: resources, - }, + Resources: ExtractStorageResourceRequirement(resources), + } + } else { + pvc.Spec = vct.Spec } + if storageClassName != "" { pvc.Spec.StorageClassName = &storageClassName } diff --git a/tests/persistent_volumes_test.go b/tests/persistent_volumes_test.go index 2804564e2..da2a8f67c 100644 --- a/tests/persistent_volumes_test.go +++ b/tests/persistent_volumes_test.go @@ -171,3 +171,90 @@ func TestPVCResize(t *testing.T) { } } + +func TestPVCTemplateResize(t *testing.T) { + longOrSkip(t) + + k8sNameSpace := getNamespace(t) + k8sClient := mustNewKubeClient(t) + + mode := api.DeploymentModeCluster + engine := api.StorageEngineRocksDB + + size10GB, _ := resource.ParseQuantity("10Gi") + size08GB, _ := resource.ParseQuantity("8Gi") + + deploymentClient := kubeArangoClient.MustNewInCluster() + deploymentTemplate := newDeployment(strings.Replace(fmt.Sprintf("trsz-%s-%s-%s", mode[:2], engine[:2], uniuri.NewLen(4)), ".", "", -1)) + deploymentTemplate.Spec.Mode = api.NewMode(mode) + deploymentTemplate.Spec.StorageEngine = api.NewStorageEngine(engine) + deploymentTemplate.Spec.TLS = api.TLSSpec{} + deploymentTemplate.Spec.SetDefaults(deploymentTemplate.GetName()) // this must be last + assert.NoError(t, deploymentTemplate.Spec.Validate()) + assert.NotNil(t, deploymentTemplate.Spec.DBServers.VolumeClaimTemplate) + deploymentTemplate.Spec.DBServers.VolumeClaimTemplate.Spec.Resources.Requests[corev1.ResourceStorage] = size08GB + + // Create deployment + _, err := deploymentClient.DatabaseV1alpha().ArangoDeployments(k8sNameSpace).Create(deploymentTemplate) + defer removeDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace) + assert.NoError(t, err, "failed to create deplyment: %s", err) + + depl, err := waitUntilDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace, deploymentIsReady()) + assert.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) + + // Get list of all pvcs for dbservers + for _, m := range depl.Status.Members.DBServers { + pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(m.PersistentVolumeClaimName, metav1.GetOptions{}) + assert.NoError(t, err, "failed to get pvc: %s", err) + volumeSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + assert.True(t, ok, "pvc does not have storage resource") + assert.True(t, volumeSize.Cmp(size08GB) == 0, "wrong volume size: expected: %s, found: %s", size08GB.String(), volumeSize.String()) + } + + // Update the deployment + // Try to change image version + depl, err = updateDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace, + func(depl *api.DeploymentSpec) { + depl.DBServers.VolumeClaimTemplate.Spec.Resources.Requests[corev1.ResourceStorage] = size10GB + }) + if err != nil { + t.Fatalf("Failed to update the deployment") + } else { + t.Log("Updated deployment") + } + + if err := retry.Retry(func() error { + // Get list of all pvcs for dbservers and check for new size + for _, m := range depl.Status.Members.DBServers { + pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(m.PersistentVolumeClaimName, metav1.GetOptions{}) + if err != nil { + return err + } + volumeSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + if !ok { + return fmt.Errorf("pvc does not have storage resource") + } + if volumeSize.Cmp(size10GB) != 0 { + return fmt.Errorf("wrong pvc size: expected: %s, found: %s", size10GB.String(), volumeSize.String()) + } + volume, err := k8sClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return err + } + volumeSize, ok = volume.Spec.Capacity[corev1.ResourceStorage] + if !ok { + return fmt.Errorf("pv does not have storage resource") + } + if volumeSize.Cmp(size10GB) != 0 { + return fmt.Errorf("wrong volume size: expected: %s, found: %s", size10GB.String(), volumeSize.String()) + } + if k8sutil.IsPersistentVolumeClaimFileSystemResizePending(pvc) { + return fmt.Errorf("persistent volume claim file system resize pending") + } + } + return nil + }, 5*time.Minute); err != nil { + t.Fatalf("PVCs not resized: %s", err.Error()) + } + +}