Skip to content

Commit

Permalink
Add Machine and KCP conditions to KCP controller
Browse files Browse the repository at this point in the history
  • Loading branch information
Sedef committed Sep 22, 2020
1 parent 0cf9f80 commit ba2d784
Show file tree
Hide file tree
Showing 13 changed files with 493 additions and 88 deletions.
60 changes: 58 additions & 2 deletions api/v1alpha3/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,8 @@ const (
// MachineHasFailureReason is the reason used when a machine has either a FailureReason or a FailureMessage set on its status.
MachineHasFailureReason = "MachineHasFailure"

// NodeNotFoundReason is the reason used when a machine's node has previously been observed but is now gone.
// NodeNotFoundReason (Severity=Error) documents a machine's node has previously been observed but is now gone.
// NB. provisioned --> NodeRef != ""
NodeNotFoundReason = "NodeNotFound"

// NodeStartupTimeoutReason is the reason used when a machine's node does not appear within the specified timeout.
Expand All @@ -120,10 +121,65 @@ const (
)

const (
// MachineOwnerRemediatedCondition is set on machines that have failed a healthcheck by the MachineHealthCheck controller.
// MachineOwnerRemediatedCondition is set on machines that have failed a healthcheck by the Machine's owner controller.
// MachineOwnerRemediatedCondition is set to False after a health check fails, but should be changed to True by the owning controller after remediation succeeds.
MachineOwnerRemediatedCondition ConditionType = "OwnerRemediated"

// WaitingForRemediationReason is the reason used when a machine fails a health check and remediation is needed.
WaitingForRemediationReason = "WaitingForRemediation"
)

// Common Pod-related Condition Reasons used by Pod-related Conditions such as MachineKubeAPIServerHealthyCondition etc.
const (
// PodProvisioningReason (Severity=Info) documents a pod waiting to be provisioned i.e., Pod is in "Pending" phase and
// PodScheduled and Initialized conditions are not yet set to True.
PodProvisioningReason = "PodProvisioning"

// PodProvisioningFailedReason (Severity=Warning) documents a pod failed during provisioning i.e., Pod is in "Pending" phase and
// PodScheduled and Initialized conditions are set to True,
// but ContainersReady or Ready condition is false (i.e., at least one of the containers are in waiting state(e.g CrashLoopbackOff, ImagePullBackOff)
PodProvisioningFailedReason = "PodProvisioningFailed"

// PodMissingReason (Severity=Warning) documents a pod does not exist.
PodMissingReason = "PodMissing"

// PodFailedReason (Severity=Error) documents a pod's at least one container has terminated in a failure
// and hence Pod is in "Failed" phase.
PodFailedReason = "PodFailed"
)

// Conditions that are only for control-plane machines. KubeadmControlPlane is the owner of these conditions.

const (
// MachineKubeAPIServerHealthyCondition reports a machine's kube-apiserver's health status.
// Set to true if kube-apiserver pod is in "Running" phase, otherwise uses Pod-related Condition Reasons.
MachineKubeAPIServerHealthyCondition ConditionType = "KubeAPIServerHealthy"

// MachineKubeControllerManagerHealthyCondition reports a machine's kube-controller-manager's health status.
// Set to true if kube-controller-manager pod is in "Running" phase, otherwise uses Pod-related Condition Reasons.
MachineKubeControllerManagerHealthyCondition ConditionType = "KubeControllerManagerHealthy"

// MachineKubeSchedulerHealthyCondition reports a machine's kube-scheduler's health status.
// Set to true if kube-scheduler pod is in "Running" phase, otherwise uses Pod-related Condition Reasons.
MachineKubeSchedulerHealthyCondition ConditionType = "KubeSchedulerHealthy"

// MachineEtcdPodHealthyCondition reports a machine's etcd pod's health status.
// Set to true if etcd pod is in "Running" phase, otherwise uses Pod-related Condition Reasons.
MachineEtcdPodHealthyCondition ConditionType = "EtcdPodHealthy"
)

const (
// MachineEtcdMemberHealthyCondition documents if the machine has an healthy etcd member.
// If not true, Pod-related Condition Reasons can be used as reasons.
MachineEtcdMemberHealthyCondition ConditionType = "EtcdMemberHealthy"

// EtcdMemberHasAlarmsReason (Severity=Warning) documents a Machine's etcd member has alarms.
EtcdMemberHasAlarmsReason = "EtcdMemberHasAlarms"

// EtcdClientRelatedFailureReason (Severity=Warning) documents client-related failures,
// either creating etcd client fails or using the created etcd client to perform some operations fails.
EtcdClientRelatedFailureReason = "EtcdClientRelatedFailure"

// NodeEtcdMissingFromMemberListReason (Severity=Warning) documents the machine's corresponding node has a ready etcd pod but not part of etcd members yet.
NodeEtcdMissingFromMemberListReason = "NodeEtcdMissingFromMemberList"
)
20 changes: 20 additions & 0 deletions controlplane/kubeadm/api/v1alpha3/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,3 +66,23 @@ const (
// ScalingDownReason (Severity=Info) documents a KubeadmControlPlane that is decreasing the number of replicas.
ScalingDownReason = "ScalingDown"
)

const (
// EtcdClusterHealthy documents the overall etcd cluster's health for the KCP-managed etcd.
EtcdClusterHealthy clusterv1.ConditionType = "EtcdClusterHealthy"

// EtcdUnknownMemberReason (Severity=Warning) documents that if there exist any node in etcd member list that cannot be associated with KCP machines.
EtcdUnknownMemberReason = "EtcdUnknownMember"

// EtcdAlarmExistReason (Severity=Warning) documents that if etcd cluster has alarms armed.
EtcdAlarmExistReason = "EtcdAlarmExist"

// EtcdMemberListUnstableReason (Severity=Info) documents if all etcd members do not have the same member-list view.
EtcdMemberListUnstableReason = "EtcdMemberListUnstable"

// EtcdMemberNumMismatchWithPodNumReason (Severity=Warning) documents if number of etcd pods does not match with etcd members.
// This case may occur when there is a failing pod but it's been removed from the member list.
// TODO: During scale down, etcd quorum may be preserved (cluster remains healthy) but there may be a mismatch between number of pods and members,
// TODO: while pod is being deleted and removed from the etcd list. This case should be differentiated from this one.
EtcdMemberNumMismatchWithPodNumReason = "EtcdMemberMismatchWithPod"
)
72 changes: 61 additions & 11 deletions controlplane/kubeadm/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,9 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re
}

func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kcp *controlplanev1.KubeadmControlPlane) error {

internal.SetKCPConditions(kcp)

// Always update the readyCondition by summarizing the state of other conditions.
conditions.SetSummary(kcp,
conditions.WithConditions(
Expand All @@ -221,6 +224,7 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc
controlplanev1.MachinesReadyCondition,
controlplanev1.AvailableCondition,
controlplanev1.CertificatesAvailableCondition,
controlplanev1.EtcdClusterHealthy,
),
)

Expand Down Expand Up @@ -282,13 +286,6 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
return ctrl.Result{}, err
}

adoptableMachines := controlPlaneMachines.Filter(machinefilters.AdoptableControlPlaneMachines(cluster.Name))
if len(adoptableMachines) > 0 {
// We adopt the Machines and then wait for the update event for the ownership reference to re-queue them so the cache is up-to-date
err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster)
return ctrl.Result{}, err
}

ownedMachines := controlPlaneMachines.Filter(machinefilters.OwnedMachines(kcp))
if len(ownedMachines) != len(controlPlaneMachines) {
logger.Info("Not all control plane machines are owned by this KubeadmControlPlane, refusing to operate in mixed management mode")
Expand All @@ -301,6 +298,21 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
return ctrl.Result{}, err
}

// If control plane is initialized, reconcile health.
if ownedMachines.Len() != 0 {
// reconcileControlPlaneHealth returns err if there is a machine being delete
if result, err := r.reconcileControlPlaneHealth(ctx, cluster, kcp, controlPlane); err != nil || !result.IsZero() {
return result, err
}
}

adoptableMachines := controlPlaneMachines.Filter(machinefilters.AdoptableControlPlaneMachines(cluster.Name))
if len(adoptableMachines) > 0 {
// We adopt the Machines and then wait for the update event for the ownership reference to re-queue them so the cache is up-to-date
err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster)
return ctrl.Result{}, err
}

// Aggregate the operational state of all the machines; while aggregating we are adding the
// source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef())
Expand Down Expand Up @@ -442,21 +454,59 @@ func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(o handler.M
return nil
}

// reconcileHealth performs health checks for control plane components and etcd
func patchControlPlaneMachine(ctx context.Context, patchHelper *patch.Helper, machine *clusterv1.Machine) error {
// Patch the object, ignoring conflicts on the conditions owned by this controller.

// TODO: Is it okay to own these conditions or just patch?
// return patchHelper.Patch(ctx, machine)

return patchHelper.Patch(
ctx,
machine,
patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
clusterv1.MachineKubeAPIServerHealthyCondition,
clusterv1.MachineKubeControllerManagerHealthyCondition,
clusterv1.MachineEtcdMemberHealthyCondition,
clusterv1.MachineEtcdPodHealthyCondition,
clusterv1.MachineKubeSchedulerHealthyCondition,
}},
)
}

// reconcileControlPlaneHealth performs health checks for control plane components and etcd
// It removes any etcd members that do not have a corresponding node.
// Also, as a final step, checks if there is any machines that is being deleted.
func (r *KubeadmControlPlaneReconciler) reconcileHealth(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) {
func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneHealth(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) {
logger := r.Log.WithValues("namespace", kcp.Namespace, "kubeadmControlPlane", kcp.Name)

for _, m := range controlPlane.Machines {
// Initialize the patch helper.
patchHelper, err := patch.NewHelper(m, r.Client)
if err != nil {
logger.Error(err, "Failed to configure the patch helper")
return ctrl.Result{Requeue: true}, nil
}

machine := m
defer func() {
internal.SetSingleMachineConditions(machine)
// Always attempt to Patch the Machine conditions after each health reconciliation.
if err := patchControlPlaneMachine(ctx, patchHelper, machine); err != nil {
logger.Error(err, "Failed to patch KubeadmControlPlane Machine")
}
}()
}

// Do a health check of the Control Plane components
if err := r.managementCluster.TargetClusterControlPlaneIsHealthy(ctx, util.ObjectKey(cluster)); err != nil {
if err := r.managementCluster.TargetClusterControlPlaneIsHealthy(ctx, controlPlane.Machines.SortedByCreationTimestamp(), util.ObjectKey(cluster)); err != nil {
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "ControlPlaneUnhealthy",
"Waiting for control plane to pass control plane health check to continue reconciliation: %v", err)
return ctrl.Result{RequeueAfter: healthCheckFailedRequeueAfter}, nil
}

// If KCP should manage etcd, ensure etcd is healthy.
if controlPlane.IsEtcdManaged() {
if err := r.managementCluster.TargetClusterEtcdIsHealthy(ctx, util.ObjectKey(cluster)); err != nil {
if err := r.managementCluster.TargetClusterEtcdIsHealthy(ctx, controlPlane.Machines.SortedByCreationTimestamp(), util.ObjectKey(cluster)); err != nil {
errList := []error{errors.Wrap(err, "failed to pass etcd health check")}
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "ControlPlaneUnhealthy",
"Waiting for control plane to pass etcd health check to continue reconciliation: %v", err)
Expand Down
4 changes: 2 additions & 2 deletions controlplane/kubeadm/controllers/fakes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,14 @@ func (f *fakeManagementCluster) GetMachinesForCluster(c context.Context, n clien
return f.Machines, nil
}

func (f *fakeManagementCluster) TargetClusterControlPlaneIsHealthy(_ context.Context, _ client.ObjectKey) error {
func (f *fakeManagementCluster) TargetClusterControlPlaneIsHealthy(_ context.Context, _ []*clusterv1.Machine, _ client.ObjectKey) error {
if !f.ControlPlaneHealthy {
return errors.New("control plane is not healthy")
}
return nil
}

func (f *fakeManagementCluster) TargetClusterEtcdIsHealthy(_ context.Context, _ client.ObjectKey) error {
func (f *fakeManagementCluster) TargetClusterEtcdIsHealthy(_ context.Context, _ []*clusterv1.Machine, _ client.ObjectKey) error {
if !f.EtcdHealthy {
return errors.New("etcd is not healthy")
}
Expand Down
12 changes: 2 additions & 10 deletions controlplane/kubeadm/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,6 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte
func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) {
logger := controlPlane.Logger()

// reconcileHealth returns err if there is a machine being delete which is a required condition to check before scaling up
if result, err := r.reconcileHealth(ctx, cluster, kcp, controlPlane); err != nil || !result.IsZero() {
return result, err
}

// Create the bootstrap configuration
bootstrapSpec := controlPlane.JoinControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
Expand All @@ -90,10 +85,6 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
) (ctrl.Result, error) {
logger := controlPlane.Logger()

if result, err := r.reconcileHealth(ctx, cluster, kcp, controlPlane); err != nil || !result.IsZero() {
return result, err
}

workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster))
if err != nil {
logger.Error(err, "Failed to create client to workload cluster")
Expand Down Expand Up @@ -123,7 +114,8 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
}
}

if err := r.managementCluster.TargetClusterControlPlaneIsHealthy(ctx, util.ObjectKey(cluster)); err != nil {
// TODO: check if this is needed after moving the health check to the main reconcile
if err := r.managementCluster.TargetClusterControlPlaneIsHealthy(ctx, controlPlane.Machines.SortedByCreationTimestamp(), util.ObjectKey(cluster)); err != nil {
logger.V(2).Info("Waiting for control plane to pass control plane health check before removing a control plane machine", "cause", err)
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "ControlPlaneUnhealthy",
"Waiting for control plane to pass control plane health check before removing a control plane machine: %v", err)
Expand Down
3 changes: 1 addition & 2 deletions controlplane/kubeadm/controllers/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package controllers

import (
"context"

"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3"
Expand All @@ -29,7 +28,7 @@ import (
)

// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
// resource status subresourcs up-to-date.
// resource status subresources up-to-date.
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error {
selector := machinefilters.ControlPlaneSelectorForCluster(cluster.Name)
// Copy label selector to its status counterpart in string format.
Expand Down

0 comments on commit ba2d784

Please sign in to comment.