Skip to content

Commit

Permalink
Refactor UpdateStatus into separate functions
Browse files Browse the repository at this point in the history
  • Loading branch information
Jont828 committed Jun 24, 2022
1 parent ba0972d commit a014ff3
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 51 deletions.
133 changes: 83 additions & 50 deletions azure/scope/machinepoolmachine.go
Expand Up @@ -52,6 +52,7 @@ const (
type (
nodeGetter interface {
GetNodeByProviderID(ctx context.Context, providerID string) (*corev1.Node, error)
// GetNodeByProviderID(ctx context.Context, providerID string) (*corev1.Node, bool, error)
GetNodeByObjectReference(ctx context.Context, nodeRef corev1.ObjectReference) (*corev1.Node, error)
}

Expand Down Expand Up @@ -235,11 +236,13 @@ func (s *MachinePoolMachineScope) IsReady() bool {
}

// SetFailureMessage sets the AzureMachinePoolMachine status failure message.
// TODO: should this be AzureMachinePoolMachine or AzureMachinePool?
func (s *MachinePoolMachineScope) SetFailureMessage(v error) {
s.AzureMachinePool.Status.FailureMessage = pointer.StringPtr(v.Error())
}

// SetFailureReason sets the AzureMachinePoolMachine status failure reason.
// TODO: should this be AzureMachinePoolMachine or AzureMachinePool?
func (s *MachinePoolMachineScope) SetFailureReason(v capierrors.MachineStatusError) {
s.AzureMachinePool.Status.FailureReason = &v
}
Expand Down Expand Up @@ -273,40 +276,47 @@ func (s *MachinePoolMachineScope) Close(ctx context.Context) error {
return s.PatchObject(ctx)
}

// UpdateStatus updates the node reference for the machine and other status fields. This func should be called at the
// end of a reconcile request and after updating the scope with the most recent Azure data.
func (s *MachinePoolMachineScope) UpdateStatus(ctx context.Context) error {
// UpdateNodeStatus AzureMachinePoolMachine conditions and ready status. It will also update the node ref and the Kubernetes
// version of the VM instance if the node is found.
// Note: This func should be called at the end of a reconcile request and after updating the scope with the most recent Azure data.
func (s *MachinePoolMachineScope) UpdateNodeStatus(ctx context.Context) error {
ctx, _, done := tele.StartSpanWithLogger(
ctx,
"scope.MachinePoolMachineScope.UpdateStatus",
"scope.MachinePoolMachineScope.UpdateNodeStatus",
)
defer done()

var (
nodeRef = s.AzureMachinePoolMachine.Status.NodeRef
node *corev1.Node
err error
)
var node *corev1.Node
nodeRef := s.AzureMachinePoolMachine.Status.NodeRef

switch {
case s.ProviderID() == "":
conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "")
case nodeRef == nil || nodeRef.Name == "":
node, err = s.workloadNodeGetter.GetNodeByProviderID(ctx, s.ProviderID())
if err != nil && !apierrors.IsNotFound(err) {
// TODO: what should we set the condition to when node == nil and err == nil?
// See if we can fetch a node using either the provider ID or the nodeRef
node, found, err := s.GetNode(ctx)
if err != nil {
if apierrors.IsNotFound(err) && nodeRef != nil && nodeRef.Name != "" {
// Node was not found due to 404 when finding by ObjectReference.
conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityError, "")
} else {
// Failed due to an unexpected error
return err
}
} else if !found {
if s.ProviderID() == "" {
// Node was not found due to not having a provider ID set
conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "")
} else {
// Node was not found due to not finding a matching node by provider ID
conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeProvisioningReason, clusterv1.ConditionSeverityInfo, "")
return errors.Wrap(err, "failed to get node by providerID")
}
default:
node, err = s.workloadNodeGetter.GetNodeByObjectReference(ctx, *nodeRef)
if err != nil && !apierrors.IsNotFound(err) {
conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityError, "")
return errors.Wrap(err, "failed to get node by object reference")
} else {
// Node was found. Check if it is ready.
nodeReady := noderefutil.IsNodeReady(node)
s.AzureMachinePoolMachine.Status.Ready = nodeReady
if nodeReady {
conditions.MarkTrue(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition)
} else {
conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, "")
}
}

if node != nil {
s.AzureMachinePoolMachine.Status.NodeRef = &corev1.ObjectReference{
Kind: node.Kind,
Namespace: node.Namespace,
Expand All @@ -315,17 +325,22 @@ func (s *MachinePoolMachineScope) UpdateStatus(ctx context.Context) error {
APIVersion: node.APIVersion,
}

nodeReady := noderefutil.IsNodeReady(node)
s.AzureMachinePoolMachine.Status.Ready = nodeReady
if nodeReady {
conditions.MarkTrue(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition)
} else {
conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, "")
}

s.AzureMachinePoolMachine.Status.Version = node.Status.NodeInfo.KubeletVersion
}

return nil
}

// UpdateInstanceStatus updates the provisioning state of the AzureMachinePoolMachine and if it has the latest model applied
// using the VMSS VM instance.
// Note: This func should be called at the end of a reconcile request and after updating the scope with the most recent Azure data.
func (s *MachinePoolMachineScope) UpdateInstanceStatus(ctx context.Context) error {
ctx, _, done := tele.StartSpanWithLogger(
ctx,
"scope.MachinePoolMachineScope.UpdateInstanceStatus",
)
defer done()

if s.instance != nil {
s.AzureMachinePoolMachine.Status.ProvisioningState = &s.instance.State
hasLatestModel, err := s.hasLatestModelApplied(ctx)
Expand All @@ -347,25 +362,15 @@ func (s *MachinePoolMachineScope) CordonAndDrain(ctx context.Context) error {
)
defer done()

var (
nodeRef = s.AzureMachinePoolMachine.Status.NodeRef
node *corev1.Node
err error
)
if nodeRef == nil || nodeRef.Name == "" {
node, err = s.workloadNodeGetter.GetNodeByProviderID(ctx, s.ProviderID())
} else {
node, err = s.workloadNodeGetter.GetNodeByObjectReference(ctx, *nodeRef)
}

switch {
case err != nil && !apierrors.IsNotFound(err):
// See if we can fetch a node using either the provider ID or the nodeRef
node, found, err := s.GetNode(ctx)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
// failed due to an unexpected error
return errors.Wrap(err, "failed to find node")
case err != nil && apierrors.IsNotFound(err):
// node was not found due to 404 when finding by ObjectReference
return nil
case node == nil:
return errors.Wrap(err, "failed to get node")
} else if !found {
// node was not found due to not finding a nodes with the ProviderID
return nil
}
Expand Down Expand Up @@ -535,6 +540,34 @@ func newWorkloadClusterProxy(c client.Client, cluster client.ObjectKey) *workloa
}
}

// GetNode returns the node associated with the AzureMachinePoolMachine.
func (s *MachinePoolMachineScope) GetNode(ctx context.Context) (node *corev1.Node, found bool, err error) {
ctx, _, done := tele.StartSpanWithLogger(
ctx,
"scope.MachinePoolMachineScope.GetNode",
)
defer done()

nodeRef := s.AzureMachinePoolMachine.Status.NodeRef

if nodeRef == nil || nodeRef.Name == "" {
node, err = s.workloadNodeGetter.GetNodeByProviderID(ctx, s.ProviderID())
if err != nil {
return nil, false, errors.Wrap(err, "failed to get node by provider ID")
}
if node == nil {
return nil, false, nil
}
} else {
node, err = s.workloadNodeGetter.GetNodeByObjectReference(ctx, *nodeRef)
if err != nil {
return nil, false, errors.Wrap(err, "failed to get node by object reference")
}
}

return node, true, nil
}

// GetNodeByObjectReference will fetch a *corev1.Node via a node object reference.
func (np *workloadClusterProxy) GetNodeByObjectReference(ctx context.Context, nodeRef corev1.ObjectReference) (*corev1.Node, error) {
workloadClient, err := getWorkloadClient(ctx, np.Client, np.Cluster)
Expand Down
2 changes: 1 addition & 1 deletion exp/controllers/azuremachinepoolmachine_controller.go
Expand Up @@ -369,7 +369,7 @@ func (r *azureMachinePoolMachineReconciler) Delete(ctx context.Context) error {

defer func() {
if err := r.Scope.UpdateStatus(ctx); err != nil {
log.V(4).Info("failed tup update vmss vm status during delete")
log.V(4).Info("failed to update vmss vm status during delete")
}
}()

Expand Down

0 comments on commit a014ff3

Please sign in to comment.