Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 35 additions & 20 deletions docs/Manual/Deployment/Kubernetes/DeploymentResource.md
Original file line number Diff line number Diff line change
Expand Up @@ -377,25 +377,22 @@ Specifies a maximum for the count of servers. If set, a specification is invalid
This setting specifies additional commandline arguments passed to all servers of this group.
The default value is an empty array.

### `spec.<group>.resources.requests.cpu: cpuUnit`
### `spec.<group>.resources: ResourceRequirements`

This setting specifies the amount of CPU requested by server of this group.
This setting specifies the resources required by pods of this group. This includes requests and limits.

See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ for details.

### `spec.<group>.resources.requests.memory: memoryUnit`
### `spec.<group>.volumeClaimTemplate.Spec: PersistentVolumeClaimSpec`

This setting specifies the amount of memory requested by server of this group.
Specifies a volumeClaimTemplate used by operator to create to volume claims for pods of this group.
This setting is not available for group `coordinators`, `syncmasters` & `syncworkers`.

See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ for details.

### `spec.<group>.resources.requests.storage: storageUnit`
The default value describes a volume with `8Gi` storage, `ReadWriteOnce` access mode and volume mode set to `PersistentVolumeFilesystem`.

This setting specifies the amount of storage required for each server of this group.
The default value is `8Gi`.

This setting is not available for group `coordinators`, `syncmasters` & `syncworkers`
because servers in these groups do not need persistent storage.
If this field is not set and `spec.<group>.resources.requests.storage` is set, then a default volume claim
with size as specified by `spec.<group>.resources.requests.storage` will be created. In that case `storage`
and `iops` is not forwarded to the pods resource requirements.

### `spec.<group>.serviceAccountName: string`

Expand All @@ -405,14 +402,6 @@ for each server of this group.
Using an alternative `ServiceAccount` is typically used to separate access rights.
The ArangoDB deployments do not require any special rights.

### `spec.<group>.storageClassName: string`

This setting specifies the `storageClass` for the `PersistentVolume`s created
for each server of this group.

This setting is not available for group `coordinators`, `syncmasters` & `syncworkers`
because servers in these groups do not need persistent storage.

### `spec.<group>.priorityClassName: string`

Priority class name for pods of this group. Will be forwarded to the pod spec. [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/)
Expand Down Expand Up @@ -443,3 +432,29 @@ For more information on tolerations, consult the [Kubernetes documentation](http
This setting specifies a set of labels to be used as `nodeSelector` for Pods of this node.

For more information on node selectors, consult the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).

## Deprecated Fields

### `spec.<group>.resources.requests.storage: storageUnit`

This setting specifies the amount of storage required for each server of this group.
The default value is `8Gi`.

This setting is not available for group `coordinators`, `syncmasters` & `syncworkers`
because servers in these groups do not need persistent storage.

Please use VolumeClaimTemplate from now on. This field is not considered if
VolumeClaimTemplate is set. Note however, that the information in requests
is completely handed over to the pod in this case.

### `spec.<group>.storageClassName: string`

This setting specifies the `storageClass` for the `PersistentVolume`s created
for each server of this group.

This setting is not available for group `coordinators`, `syncmasters` & `syncworkers`
because servers in these groups do not need persistent storage.

Please use VolumeClaimTemplate from now on. This field is not considered if
VolumeClaimTemplate is set. Note however, that the information in requests
is completely handed over to the pod in this case.
43 changes: 37 additions & 6 deletions pkg/apis/deployment/v1alpha/server_group_spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ type ServerGroupSpec struct {
Probes *ServerGroupProbesSpec `json:"probes,omitempty"`
// PriorityClassName specifies a priority class name
PriorityClassName string `json:"priorityClassName,omitempty"`
// VolumeClaimTemplate specifies a template for volume claims
VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
// Sidecars specifies a list of additional containers to be started
Sidecars []v1.Container `json:"sidecars,omitempty"`
}
Expand Down Expand Up @@ -101,6 +103,16 @@ func (s ServerGroupProbesSpec) IsReadinessProbeDisabled() bool {
return util.BoolOrDefault(s.ReadinessProbeDisabled)
}

// HasVolumeClaimTemplate returns whether there is a volumeClaimTemplate or not
func (s ServerGroupSpec) HasVolumeClaimTemplate() bool {
return s.VolumeClaimTemplate != nil
}

// GetVolumeClaimTemplate returns a pointer to a volume claim template or nil if none is specified
func (s ServerGroupSpec) GetVolumeClaimTemplate() *v1.PersistentVolumeClaim {
return s.VolumeClaimTemplate
}

// GetCount returns the value of count.
func (s ServerGroupSpec) GetCount() int {
return util.IntOrDefault(s.Count)
Expand Down Expand Up @@ -243,13 +255,25 @@ func (s *ServerGroupSpec) SetDefaults(group ServerGroup, used bool, mode Deploym
s.MinCount = nil
s.MaxCount = nil
}
if _, found := s.Resources.Requests[v1.ResourceStorage]; !found {
switch group {
case ServerGroupSingle, ServerGroupAgents, ServerGroupDBServers:
if s.Resources.Requests == nil {
s.Resources.Requests = make(map[v1.ResourceName]resource.Quantity)
if !s.HasVolumeClaimTemplate() {
if _, found := s.Resources.Requests[v1.ResourceStorage]; !found {
switch group {
case ServerGroupSingle, ServerGroupAgents, ServerGroupDBServers:
volumeMode := v1.PersistentVolumeFilesystem
s.VolumeClaimTemplate = &v1.PersistentVolumeClaim{
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
VolumeMode: &volumeMode,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("8Gi"),
},
},
},
}
}
s.Resources.Requests[v1.ResourceStorage] = resource.MustParse("8Gi")
}
}
}
Expand Down Expand Up @@ -294,6 +318,9 @@ func (s *ServerGroupSpec) SetDefaultsFrom(source ServerGroupSpec) {
}
setDefaultsFromResourceList(&s.Resources.Limits, source.Resources.Limits)
setDefaultsFromResourceList(&s.Resources.Requests, source.Resources.Requests)
if s.VolumeClaimTemplate == nil {
s.VolumeClaimTemplate = source.VolumeClaimTemplate.DeepCopy()
}
}

// ResetImmutableFields replaces all immutable fields in the given target with values from the source spec.
Expand All @@ -306,5 +333,9 @@ func (s ServerGroupSpec) ResetImmutableFields(group ServerGroup, fieldPrefix str
resetFields = append(resetFields, fieldPrefix+".count")
}
}
if s.HasVolumeClaimTemplate() != target.HasVolumeClaimTemplate() {
target.VolumeClaimTemplate = s.GetVolumeClaimTemplate()
resetFields = append(resetFields, fieldPrefix+".volumeClaimTemplate")
}
return resetFields
}
5 changes: 5 additions & 0 deletions pkg/apis/deployment/v1alpha/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

17 changes: 17 additions & 0 deletions pkg/deployment/context_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,23 @@ func (d *Deployment) UpdateStatus(status api.DeploymentStatus, lastVersion int32
return nil
}

// UpdateMember updates the deployment status wrt the given member.
func (d *Deployment) UpdateMember(member api.MemberStatus) error {
status, lastVersion := d.GetStatus()
_, group, found := status.Members.ElementByID(member.ID)
if !found {
return maskAny(fmt.Errorf("Member %s not found", member.ID))
}
if err := status.Members.Update(member, group); err != nil {
return maskAny(err)
}
if err := d.UpdateStatus(status, lastVersion); err != nil {
log.Debug().Err(err).Msg("Updating CR status failed")
return maskAny(err)
}
return nil
}

// GetDatabaseClient returns a cached client for the entire database (cluster coordinators or single server),
// creating one if needed.
func (d *Deployment) GetDatabaseClient(ctx context.Context) (driver.Client, error) {
Expand Down
4 changes: 2 additions & 2 deletions pkg/deployment/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (
"time"

"github.com/rs/zerolog"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"

Expand Down Expand Up @@ -198,7 +198,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, ima
}
}
if err := k8sutil.CreateArangodPod(ib.KubeCli, true, ib.APIObject, role, id, podName, "", image, "", "", ib.Spec.GetImagePullPolicy(), "", false, terminationGracePeriod, args, env, nil, nil, nil,
tolerations, serviceAccountName, "", "", "", nil, "", v1.ResourceRequirements{}, nil, nil); err != nil {
tolerations, serviceAccountName, "", "", "", nil, "", v1.ResourceRequirements{}, nil, nil, nil); err != nil {
log.Debug().Err(err).Msg("Failed to create image ID pod")
return true, maskAny(err)
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/deployment/reconcile/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ type Context interface {
// UpdateStatus replaces the status of the deployment with the given status and
// updates the resources in k8s.
UpdateStatus(status api.DeploymentStatus, lastVersion int32, force ...bool) error
// UpdateMember updates the deployment status wrt the given member.
UpdateMember(member api.MemberStatus) error
// GetDatabaseClient returns a cached client for the entire database (cluster coordinators or single server),
// creating one if needed.
GetDatabaseClient(ctx context.Context) (driver.Client, error)
Expand Down
11 changes: 8 additions & 3 deletions pkg/deployment/reconcile/plan_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -383,9 +383,14 @@ func podNeedsRotation(log zerolog.Logger, p v1.Pod, apiObject metav1.Object, spe
}

// Check resource requirements
if resourcesRequireRotation(
k8sutil.FilterStorageResourceRequirement(spec.GetServerGroupSpec(group).Resources),
k8sutil.GetArangoDBContainerFromPod(&p).Resources) {
var resources v1.ResourceRequirements
if groupSpec.HasVolumeClaimTemplate() {
resources = groupSpec.Resources // If there is a volume claim template compare all resources
} else {
resources = k8sutil.ExtractPodResourceRequirement(groupSpec.Resources)
}

if resourcesRequireRotation(resources, k8sutil.GetArangoDBContainerFromPod(&p).Resources) {
return true, "Resource Requirements changed"
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/deployment/reconcile/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ func (r *Reconciler) CheckDeployment() error {
r.log.Error().Err(err).Msg("Failed to delete pod")
}
m.Phase = api.MemberPhaseNone
if err := status.Members.Update(m, api.ServerGroupCoordinators); err != nil {

if err := r.context.UpdateMember(m); err != nil {
r.log.Error().Err(err).Msg("Failed to update member")
}
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/deployment/resources/pod_creator.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ import (
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/pkg/errors"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

Expand Down Expand Up @@ -660,7 +660,7 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
finalizers := r.createPodFinalizers(group)
if err := k8sutil.CreateArangodPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, m.PersistentVolumeClaimName, imageInfo.ImageID, lifecycleImage, alpineImage, spec.GetImagePullPolicy(),
engine, requireUUID, terminationGracePeriod, args, env, finalizers, livenessProbe, readinessProbe, tolerations, serviceAccountName, tlsKeyfileSecretName, rocksdbEncryptionSecretName,
clusterJWTSecretName, groupSpec.GetNodeSelector(), groupSpec.PriorityClassName, groupSpec.Resources, exporter, groupSpec.GetSidecars()); err != nil {
clusterJWTSecretName, groupSpec.GetNodeSelector(), groupSpec.PriorityClassName, groupSpec.Resources, exporter, groupSpec.GetSidecars(), groupSpec.VolumeClaimTemplate); err != nil {
return maskAny(err)
}
log.Debug().Str("pod-name", m.PodName).Msg("Created pod")
Expand Down
69 changes: 52 additions & 17 deletions pkg/deployment/resources/pvc_inspector.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,24 +83,41 @@ func (r *Resources) InspectPVCs(ctx context.Context) (util.Interval, error) {

// Resize inspector
groupSpec := spec.GetServerGroupSpec(group)
if requestedSize, ok := groupSpec.Resources.Requests[apiv1.ResourceStorage]; ok {
if volumeSize, ok := p.Spec.Resources.Requests[apiv1.ResourceStorage]; ok {
cmp := volumeSize.Cmp(requestedSize)
if cmp < 0 {
// Size of the volume is smaller than the requested size
// Update the pvc with the request size
p.Spec.Resources.Requests[apiv1.ResourceStorage] = requestedSize

log.Debug().Str("pvc-capacity", volumeSize.String()).Str("requested", requestedSize.String()).Msg("PVC capacity differs - updating")
kube := r.context.GetKubeCli()
if _, err := kube.CoreV1().PersistentVolumeClaims(r.context.GetNamespace()).Update(&p); err != nil {
log.Error().Err(err).Msg("Failed to update pvc")
}

if groupSpec.HasVolumeClaimTemplate() {
res := groupSpec.GetVolumeClaimTemplate().Spec.Resources.Requests
// For pvc only resources.requests is mutable
if compareResourceList(p.Spec.Resources.Requests, res) {
p.Spec.Resources.Requests = res
log.Debug().Msg("volumeClaimTemplate requested resources changed - updating")
kube := r.context.GetKubeCli()
if _, err := kube.CoreV1().PersistentVolumeClaims(r.context.GetNamespace()).Update(&p); err != nil {
log.Error().Err(err).Msg("Failed to update pvc")
} else {
r.context.CreateEvent(k8sutil.NewPVCResizedEvent(r.context.GetAPIObject(), p.Name))
} else if cmp > 0 {
log.Error().Str("server-group", group.AsRole()).Str("pvc-storage-size", volumeSize.String()).Str("requested-size", requestedSize.String()).
Msg("Volume size should not shrink")
r.context.CreateEvent(k8sutil.NewCannotShrinkVolumeEvent(r.context.GetAPIObject(), p.Name))
}
}
} else {
if requestedSize, ok := groupSpec.Resources.Requests[apiv1.ResourceStorage]; ok {
if volumeSize, ok := p.Spec.Resources.Requests[apiv1.ResourceStorage]; ok {
cmp := volumeSize.Cmp(requestedSize)
if cmp < 0 {
// Size of the volume is smaller than the requested size
// Update the pvc with the request size
p.Spec.Resources.Requests[apiv1.ResourceStorage] = requestedSize

log.Debug().Str("pvc-capacity", volumeSize.String()).Str("requested", requestedSize.String()).Msg("PVC capacity differs - updating")
kube := r.context.GetKubeCli()
if _, err := kube.CoreV1().PersistentVolumeClaims(r.context.GetNamespace()).Update(&p); err != nil {
log.Error().Err(err).Msg("Failed to update pvc")
} else {
r.context.CreateEvent(k8sutil.NewPVCResizedEvent(r.context.GetAPIObject(), p.Name))
}
} else if cmp > 0 {
log.Error().Str("server-group", group.AsRole()).Str("pvc-storage-size", volumeSize.String()).Str("requested-size", requestedSize.String()).
Msg("Volume size should not shrink")
r.context.CreateEvent(k8sutil.NewCannotShrinkVolumeEvent(r.context.GetAPIObject(), p.Name))
}
}
}
}
Expand All @@ -118,3 +135,21 @@ func (r *Resources) InspectPVCs(ctx context.Context) (util.Interval, error) {

return nextInterval, nil
}

func compareResourceList(wanted, given apiv1.ResourceList) bool {
for k, v := range wanted {
if gv, ok := given[k]; !ok {
return true
} else if v.Cmp(gv) != 0 {
return true
}
}

for k := range given {
if _, ok := wanted[k]; !ok {
return true
}
}

return false
}
3 changes: 2 additions & 1 deletion pkg/deployment/resources/pvcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,9 @@ func (r *Resources) EnsurePVCs() error {
storageClassName := spec.GetStorageClassName()
role := group.AsRole()
resources := spec.Resources
vct := spec.VolumeClaimTemplate
finalizers := r.createPVCFinalizers(group)
if err := k8sutil.CreatePersistentVolumeClaim(pvcs, m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, enforceAntiAffinity, resources, finalizers, owner); err != nil {
if err := k8sutil.CreatePersistentVolumeClaim(pvcs, m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, enforceAntiAffinity, resources, vct, finalizers, owner); err != nil {
return maskAny(err)
}
}
Expand Down
Loading