Skip to content

Commit

Permalink
chore: remove v1alpha5 utils (#658)
Browse files Browse the repository at this point in the history
  • Loading branch information
njtran committed Nov 2, 2023
1 parent fa801b9 commit 079e3ce
Show file tree
Hide file tree
Showing 8 changed files with 20 additions and 1,400 deletions.
98 changes: 4 additions & 94 deletions pkg/controllers/disruption/events/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,9 @@ import (

"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/events"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
)

func Launching(nodeClaim *v1beta1.NodeClaim, reason string) events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningLaunching",
Message: fmt.Sprintf("Launching Machine: %s", cases.Title(language.Und, cases.NoLower).String(reason)),
DedupeValues: []string{string(machine.UID), reason},
}
}
return events.Event{
InvolvedObject: nodeClaim,
Type: v1.EventTypeNormal,
Expand All @@ -48,16 +37,6 @@ func Launching(nodeClaim *v1beta1.NodeClaim, reason string) events.Event {
}

func WaitingOnReadiness(nodeClaim *v1beta1.NodeClaim) events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningWaitingReadiness",
Message: "Waiting on readiness to continue deprovisioning",
DedupeValues: []string{string(machine.UID)},
}
}
return events.Event{
InvolvedObject: nodeClaim,
Type: v1.EventTypeNormal,
Expand All @@ -68,16 +47,6 @@ func WaitingOnReadiness(nodeClaim *v1beta1.NodeClaim) events.Event {
}

func WaitingOnDeletion(nodeClaim *v1beta1.NodeClaim) events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningWaitingDeletion",
Message: "Waiting on deletion to continue deprovisioning",
DedupeValues: []string{string(machine.UID)},
}
}
return events.Event{
InvolvedObject: nodeClaim,
Type: v1.EventTypeNormal,
Expand All @@ -88,25 +57,6 @@ func WaitingOnDeletion(nodeClaim *v1beta1.NodeClaim) events.Event {
}

func Terminating(node *v1.Node, nodeClaim *v1beta1.NodeClaim, reason string) []events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return []events.Event{
{
InvolvedObject: node,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningTerminating",
Message: fmt.Sprintf("Deprovisioning Node: %s", cases.Title(language.Und, cases.NoLower).String(reason)),
DedupeValues: []string{string(node.UID), reason},
},
{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningTerminating",
Message: fmt.Sprintf("Deprovisioning Machine: %s", cases.Title(language.Und, cases.NoLower).String(reason)),
DedupeValues: []string{string(machine.UID), reason},
},
}
}
return []events.Event{
{
InvolvedObject: node,
Expand All @@ -125,30 +75,9 @@ func Terminating(node *v1.Node, nodeClaim *v1beta1.NodeClaim, reason string) []e
}
}

// Unconsolidatable is an event that informs the user that a Machine/Node combination cannot be consolidated
// due to the state of the Machine/Node or due to some state of the pods that are scheduled to the Machine/Node
// Unconsolidatable is an event that informs the user that a NodeClaim/Node combination cannot be consolidated
// due to the state of the NodeClaim/Node or due to some state of the pods that are scheduled to the NodeClaim/Node
func Unconsolidatable(node *v1.Node, nodeClaim *v1beta1.NodeClaim, reason string) []events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return []events.Event{
{
InvolvedObject: node,
Type: v1.EventTypeNormal,
Reason: "Unconsolidatable",
Message: reason,
DedupeValues: []string{string(node.UID)},
DedupeTimeout: time.Minute * 15,
},
{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "Unconsolidatable",
Message: reason,
DedupeValues: []string{string(machine.UID)},
DedupeTimeout: time.Minute * 15,
},
}
}
return []events.Event{
{
InvolvedObject: node,
Expand All @@ -169,28 +98,9 @@ func Unconsolidatable(node *v1.Node, nodeClaim *v1beta1.NodeClaim, reason string
}
}

// Blocked is an event that informs the user that a Machine/Node combination is blocked on deprovisioning
// due to the state of the Machine/Node or due to some state of the pods that are scheduled to the Machine/Node
// Blocked is an event that informs the user that a NodeClaim/Node combination is blocked on deprovisioning
// due to the state of the NodeClaim/Node or due to some state of the pods that are scheduled to the NodeClaim/Node
func Blocked(node *v1.Node, nodeClaim *v1beta1.NodeClaim, reason string) []events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return []events.Event{
{
InvolvedObject: node,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningBlocked",
Message: fmt.Sprintf("Cannot deprovision Node: %s", reason),
DedupeValues: []string{string(node.UID)},
},
{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningBlocked",
Message: fmt.Sprintf("Cannot deprovision Machine: %s", reason),
DedupeValues: []string{string(machine.UID)},
},
}
}
return []events.Event{
{
InvolvedObject: node,
Expand Down
11 changes: 0 additions & 11 deletions pkg/controllers/nodeclaim/consistency/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,9 @@ import (

"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/events"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
)

func FailedConsistencyCheckEvent(nodeClaim *v1beta1.NodeClaim, message string) events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeWarning,
Reason: "FailedConsistencyCheck",
Message: message,
DedupeValues: []string{string(machine.UID), message},
}
}
return events.Event{
InvolvedObject: nodeClaim,
Type: v1.EventTypeWarning,
Expand Down
21 changes: 0 additions & 21 deletions pkg/controllers/nodeclaim/lifecycle/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,9 @@ import (

"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/events"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
)

func InsufficientCapacityErrorEvent(nodeClaim *v1beta1.NodeClaim, err error) events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeWarning,
Reason: "InsufficientCapacityError",
Message: fmt.Sprintf("Machine %s event: %s", machine.Name, truncateMessage(err.Error())),
DedupeValues: []string{string(machine.UID)},
}
}
return events.Event{
InvolvedObject: nodeClaim,
Type: v1.EventTypeWarning,
Expand All @@ -45,16 +34,6 @@ func InsufficientCapacityErrorEvent(nodeClaim *v1beta1.NodeClaim, err error) eve
}

func NodeClassNotReadyEvent(nodeClaim *v1beta1.NodeClaim, err error) events.Event {
if nodeClaim.IsMachine {
machine := machineutil.NewFromNodeClaim(nodeClaim)
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeWarning,
Reason: "ProviderNotReady",
Message: fmt.Sprintf("Machine %s event: %s", machine.Name, truncateMessage(err.Error())),
DedupeValues: []string{string(machine.UID)},
}
}
return events.Event{
InvolvedObject: nodeClaim,
Type: v1.EventTypeWarning,
Expand Down
116 changes: 9 additions & 107 deletions pkg/test/expectations/expectations.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,6 @@ import (
"github.com/aws/karpenter-core/pkg/operator/scheme"
pscheduling "github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/test"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
nodeclaimutil "github.com/aws/karpenter-core/pkg/utils/nodeclaim"
)

const (
Expand Down Expand Up @@ -277,28 +275,14 @@ func ExpectProvisionedNoBinding(ctx context.Context, c client.Client, cluster *s
if err != nil {
return bindings
}
if key.IsMachine {
machine := &v1alpha5.Machine{}
Expect(c.Get(ctx, types.NamespacedName{Name: key.Name}, machine)).To(Succeed())
machine, node := ExpectMachineDeployed(ctx, c, cluster, cloudProvider, machine)
if machine != nil && node != nil {
for _, pod := range m.Pods {
bindings[pod] = &Binding{
Machine: machine,
Node: node,
}
}
}
} else {
nodeClaim := &v1beta1.NodeClaim{}
Expect(c.Get(ctx, types.NamespacedName{Name: key.Name}, nodeClaim)).To(Succeed())
nodeClaim, node := ExpectNodeClaimDeployed(ctx, c, cluster, cloudProvider, nodeClaim)
if nodeClaim != nil && node != nil {
for _, pod := range m.Pods {
bindings[pod] = &Binding{
NodeClaim: nodeClaim,
Node: node,
}
nodeClaim := &v1beta1.NodeClaim{}
Expect(c.Get(ctx, types.NamespacedName{Name: key.Name}, nodeClaim)).To(Succeed())
nodeClaim, node := ExpectNodeClaimDeployed(ctx, c, cluster, cloudProvider, nodeClaim)
if nodeClaim != nil && node != nil {
for _, pod := range m.Pods {
bindings[pod] = &Binding{
NodeClaim: nodeClaim,
Node: node,
}
}
}
Expand All @@ -309,51 +293,13 @@ func ExpectProvisionedNoBinding(ctx context.Context, c client.Client, cluster *s
Node: node.Node,
}
if node.NodeClaim != nil {
if node.NodeClaim.IsMachine {
bindings[pod].Machine = machineutil.NewFromNodeClaim(node.NodeClaim)
} else {
bindings[pod].NodeClaim = node.NodeClaim
}
bindings[pod].NodeClaim = node.NodeClaim
}
}
}
return bindings
}

func ExpectMachineDeployedNoNode(ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, m *v1alpha5.Machine) (*v1alpha5.Machine, error) {
GinkgoHelper()
resolved, err := cloudProvider.Create(ctx, nodeclaimutil.New(m))
// TODO @joinnis: Check this error rather than swallowing it. This is swallowed right now due to how we are doing some testing in the cloudprovider
if err != nil {
return m, err
}
Expect(err).To(Succeed())

// Make the machine ready in the status conditions
m = machineutil.NewFromNodeClaim(lifecycle.PopulateNodeClaimDetails(nodeclaimutil.New(m), resolved))
m.StatusConditions().MarkTrue(v1alpha5.MachineLaunched)
ExpectApplied(ctx, c, m)
cluster.UpdateNodeClaim(nodeclaimutil.New(m))
return m, nil
}

func ExpectMachineDeployed(ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, m *v1alpha5.Machine) (*v1alpha5.Machine, *v1.Node) {
GinkgoHelper()
m, err := ExpectMachineDeployedNoNode(ctx, c, cluster, cloudProvider, m)
if err != nil {
return m, nil
}
m.StatusConditions().MarkTrue(v1alpha5.MachineRegistered)

// Mock the machine launch and node joining at the apiserver
node := test.MachineLinkedNode(m)
node.Labels = lo.Assign(node.Labels, map[string]string{v1alpha5.LabelNodeRegistered: "true"})
ExpectApplied(ctx, c, m, node)
Expect(cluster.UpdateNode(ctx, node)).To(Succeed())
cluster.UpdateNodeClaim(nodeclaimutil.New(m))
return m, node
}

func ExpectNodeClaimDeployedNoNode(ctx context.Context, c client.Client, cluster *state.Cluster, cloudProvider cloudprovider.CloudProvider, nc *v1beta1.NodeClaim) (*v1beta1.NodeClaim, error) {
GinkgoHelper()
resolved, err := cloudProvider.Create(ctx, nc)
Expand Down Expand Up @@ -388,24 +334,6 @@ func ExpectNodeClaimDeployed(ctx context.Context, c client.Client, cluster *stat
return nc, node
}

func ExpectMachinesCascadeDeletion(ctx context.Context, c client.Client, machines ...*v1alpha5.Machine) {
GinkgoHelper()
nodes := ExpectNodes(ctx, c)
for _, machine := range machines {
err := c.Get(ctx, client.ObjectKeyFromObject(machine), &v1alpha5.Machine{})
if !errors.IsNotFound(err) {
continue
}
for _, node := range nodes {
if node.Spec.ProviderID == machine.Status.ProviderID {
Expect(c.Delete(ctx, node))
ExpectFinalizersRemoved(ctx, c, node)
ExpectNotFound(ctx, c, node)
}
}
}
}

func ExpectNodeClaimsCascadeDeletion(ctx context.Context, c client.Client, nodeClaims ...*v1beta1.NodeClaim) {
GinkgoHelper()
nodes := ExpectNodes(ctx, c)
Expand Down Expand Up @@ -435,17 +363,6 @@ func ExpectMakeNodeClaimsInitialized(ctx context.Context, c client.Client, nodeC
}
}

func ExpectMakeMachinesInitialized(ctx context.Context, c client.Client, machines ...*v1alpha5.Machine) {
GinkgoHelper()
for i := range machines {
machines[i] = ExpectExists(ctx, c, machines[i])
machines[i].StatusConditions().MarkTrue(v1alpha5.MachineLaunched)
machines[i].StatusConditions().MarkTrue(v1alpha5.MachineRegistered)
machines[i].StatusConditions().MarkTrue(v1alpha5.MachineInitialized)
ExpectApplied(ctx, c, machines[i])
}
}

func ExpectMakeNodesInitialized(ctx context.Context, c client.Client, nodes ...*v1.Node) {
GinkgoHelper()
ExpectMakeNodesReady(ctx, c, nodes...)
Expand Down Expand Up @@ -618,21 +535,6 @@ func ExpectNodeClaims(ctx context.Context, c client.Client) []*v1beta1.NodeClaim
return lo.ToSlicePtr(nodeClaims.Items)
}

func ExpectMakeNodesAndMachinesInitializedAndStateUpdated(ctx context.Context, c client.Client, nodeStateController, machineStateController controller.Controller, nodes []*v1.Node, machines []*v1alpha5.Machine) {
GinkgoHelper()

ExpectMakeNodesInitialized(ctx, c, nodes...)
ExpectMakeMachinesInitialized(ctx, c, machines...)

// Inform cluster state about node and machine readiness
for _, n := range nodes {
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(n))
}
for _, m := range machines {
ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(m))
}
}

func ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated(ctx context.Context, c client.Client, nodeStateController, nodeClaimStateController controller.Controller, nodes []*v1.Node, nodeClaims []*v1beta1.NodeClaim) {
GinkgoHelper()

Expand Down

0 comments on commit 079e3ce

Please sign in to comment.