Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Remove remaining alpha fields from codebase #819

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 1 addition & 9 deletions pkg/apis/apis.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,35 +22,27 @@ import (
"k8s.io/apimachinery/pkg/runtime"

"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/utils/functional"
)

var (
// Builder includes all types within the apis package
Builder = runtime.NewSchemeBuilder(
v1alpha5.SchemeBuilder.AddToScheme,
v1beta1.SchemeBuilder.AddToScheme,
)
// AddToScheme may be used to add all resources defined in the project to a Scheme
AddToScheme = Builder.AddToScheme
Settings = []settings.Injectable{&settings.Settings{}}
)

//go:generate controller-gen crd:generateEmbeddedObjectMeta=true object:headerFile="../../hack/boilerplate.go.txt" paths="./..." output:crd:artifacts:config=crds
//go:generate controller-gen crd object:headerFile="../../hack/boilerplate.go.txt" paths="./..." output:crd:artifacts:config=crds
var (
//go:embed crds/karpenter.sh_provisioners.yaml
ProvisionerCRD []byte
//go:embed crds/karpenter.sh_machines.yaml
MachineCRD []byte
//go:embed crds/karpenter.sh_nodepools.yaml
NodePoolCRD []byte
//go:embed crds/karpenter.sh_nodeclaims.yaml
NodeClaimCRD []byte
CRDs = []*v1.CustomResourceDefinition{
lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](ProvisionerCRD)),
lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](MachineCRD)),
lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](NodePoolCRD)),
lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](NodeClaimCRD)),
}
Expand Down
354 changes: 0 additions & 354 deletions pkg/apis/crds/karpenter.sh_machines.yaml

This file was deleted.

381 changes: 0 additions & 381 deletions pkg/apis/crds/karpenter.sh_provisioners.yaml

This file was deleted.

1 change: 1 addition & 0 deletions pkg/apis/v1alpha5/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

// +kubebuilder:skip
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:defaulter-gen=TypeMeta
Expand Down
2 changes: 0 additions & 2 deletions pkg/apis/v1alpha5/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,6 @@ func ProviderAnnotation(p *Provider) map[string]string {
return map[string]string{ProviderCompatabilityAnnotationKey: string(raw)}
}

// TODO @joinnis: Mark this version as deprecated when v1beta1 APIs are formally released

// Provisioner is the Schema for the Provisioners API
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=provisioners,scope=Cluster,categories=karpenter
Expand Down
12 changes: 0 additions & 12 deletions pkg/apis/v1beta1/nodeclaim.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,6 @@ type NodeClaimSpec struct {
// NodeClassRef is a reference to an object that defines provider specific configuration
// +required
NodeClassRef *NodeClassReference `json:"nodeClassRef"`
// Provider stores CloudProvider-specific details from a conversion from a v1alpha5.Provisioner
// TODO @joinnis: Remove this field when v1alpha5 is unsupported in a future version of Karpenter
Provider *Provider `json:"-"`
}

// ResourceRequirements models the required resources for the NodeClaim to launch
Expand All @@ -73,10 +70,6 @@ type KubeletConfiguration struct {
// Note that not all providers may use all addresses.
//+optional
ClusterDNS []string `json:"clusterDNS,omitempty"`
// TODO @joinnis: Remove this field when v1alpha5 is unsupported in a future version of Karpenter
// ContainerRuntime is the container runtime to be used with your worker nodes.
// +optional
ContainerRuntime *string `json:"-"`
// MaxPods is an override for the maximum number of pods that can run on
// a worker node instance.
// +kubebuilder:validation:Minimum:=0
Expand Down Expand Up @@ -170,11 +163,6 @@ type NodeClaim struct {

Spec NodeClaimSpec `json:"spec,omitempty"`
Status NodeClaimStatus `json:"status,omitempty"`

// IsMachine tells Karpenter whether the in-memory representation of this object
// is actually referring to a NodeClaim object. This value is not actually part of the v1beta1 public-facing API
// TODO @joinnis: Remove this field when v1alpha5 is unsupported in a future version of Karpenter
IsMachine bool `json:"-"`
}

// NodeClaimList contains a list of NodeClaims
Expand Down
10 changes: 0 additions & 10 deletions pkg/apis/v1beta1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion pkg/cloudprovider/fake/cloudprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ func (c *CloudProvider) Create(ctx context.Context, nodeClaim *v1beta1.NodeClaim
Capacity: functional.FilterMap(instanceType.Capacity, func(_ v1.ResourceName, v resource.Quantity) bool { return !resources.IsZero(v) }),
Allocatable: functional.FilterMap(instanceType.Allocatable(), func(_ v1.ResourceName, v resource.Quantity) bool { return !resources.IsZero(v) }),
},
IsMachine: nodeClaim.IsMachine,
}
c.CreatedNodeClaims[created.Status.ProviderID] = created
return created, nil
Expand Down
6 changes: 3 additions & 3 deletions pkg/controllers/disruption/consolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,9 +180,9 @@ func (c *consolidation) computeConsolidation(ctx context.Context, candidates ...
// assumption, that the spot variant will launch. We also need to add a requirement to the node to ensure that if
// spot capacity is insufficient we don't replace the node with a more expensive on-demand node. Instead the launch
// should fail and we'll just leave the node alone.
ctReq := results.NewNodeClaims[0].Requirements.Get(v1alpha5.LabelCapacityType)
if ctReq.Has(v1alpha5.CapacityTypeSpot) && ctReq.Has(v1alpha5.CapacityTypeOnDemand) {
results.NewNodeClaims[0].Requirements.Add(scheduling.NewRequirement(v1alpha5.LabelCapacityType, v1.NodeSelectorOpIn, v1alpha5.CapacityTypeSpot))
ctReq := results.NewNodeClaims[0].Requirements.Get(v1beta1.CapacityTypeLabelKey)
if ctReq.Has(v1beta1.CapacityTypeSpot) && ctReq.Has(v1beta1.CapacityTypeOnDemand) {
results.NewNodeClaims[0].Requirements.Add(scheduling.NewRequirement(v1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, v1beta1.CapacityTypeSpot))
}

return Command{
Expand Down
19 changes: 9 additions & 10 deletions pkg/controllers/disruption/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"

coreapis "github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
Expand Down Expand Up @@ -114,7 +113,7 @@ var _ = BeforeEach(func() {

onDemandInstances = lo.Filter(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType, _ int) bool {
for _, o := range i.Offerings.Available() {
if o.CapacityType == v1alpha5.CapacityTypeOnDemand {
if o.CapacityType == v1beta1.CapacityTypeOnDemand {
return true
}
}
Expand Down Expand Up @@ -279,7 +278,7 @@ var _ = Describe("Disruption Taints", func() {
Name: "current-on-demand",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
CapacityType: v1beta1.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 1.5,
Available: false,
Expand All @@ -290,19 +289,19 @@ var _ = Describe("Disruption Taints", func() {
Name: "spot-replacement",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeSpot,
CapacityType: v1beta1.CapacityTypeSpot,
Zone: "test-zone-1a",
Price: 1.0,
Available: true,
},
{
CapacityType: v1alpha5.CapacityTypeSpot,
CapacityType: v1beta1.CapacityTypeSpot,
Zone: "test-zone-1b",
Price: 0.2,
Available: true,
},
{
CapacityType: v1alpha5.CapacityTypeSpot,
CapacityType: v1beta1.CapacityTypeSpot,
Zone: "test-zone-1c",
Price: 0.4,
Available: true,
Expand All @@ -313,10 +312,10 @@ var _ = Describe("Disruption Taints", func() {
nodeClaim, node = test.NodeClaimAndNode(v1beta1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelInstanceTypeStable: currentInstance.Name,
v1alpha5.LabelCapacityType: currentInstance.Offerings[0].CapacityType,
v1.LabelTopologyZone: currentInstance.Offerings[0].Zone,
v1beta1.NodePoolLabelKey: nodePool.Name,
v1.LabelInstanceTypeStable: currentInstance.Name,
v1beta1.CapacityTypeLabelKey: currentInstance.Offerings[0].CapacityType,
v1.LabelTopologyZone: currentInstance.Offerings[0].Zone,
v1beta1.NodePoolLabelKey: nodePool.Name,
},
},
Status: v1beta1.NodeClaimStatus{
Expand Down
5 changes: 0 additions & 5 deletions pkg/controllers/metrics/node/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,9 @@ import (

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/metrics/node"
"github.com/aws/karpenter-core/pkg/controllers/state/informer"
"github.com/aws/karpenter-core/pkg/operator/controller"
Expand All @@ -53,7 +51,6 @@ var cluster *state.Cluster
var nodeController controller.Controller
var metricsStateController controller.Controller
var cloudProvider *fake.CloudProvider
var provisioner *v1alpha5.Provisioner

func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
Expand All @@ -69,10 +66,8 @@ var _ = BeforeSuite(func() {
cloudProvider.InstanceTypes = fake.InstanceTypesAssorted()
fakeClock = clock.NewFakeClock(time.Now())
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
provisioner = test.Provisioner(test.ProvisionerOptions{ObjectMeta: metav1.ObjectMeta{Name: "default"}})
nodeController = informer.NewNodeController(env.Client, cluster)
metricsStateController = node.NewController(cluster)
ExpectApplied(ctx, env.Client, provisioner)
})

var _ = AfterSuite(func() {
Expand Down
7 changes: 2 additions & 5 deletions pkg/controllers/node/termination/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/node/termination/terminator"
Expand Down Expand Up @@ -126,13 +125,11 @@ func (c *Controller) removeFinalizer(ctx context.Context, n *v1.Node) error {
return client.IgnoreNotFound(fmt.Errorf("patching node, %w", err))
}
metrics.NodesTerminatedCounter.With(prometheus.Labels{
metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey],
metrics.ProvisionerLabel: n.Labels[v1alpha5.ProvisionerNameLabelKey],
metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey],
}).Inc()
// We use stored.DeletionTimestamp since the api-server may give back a node after the patch without a deletionTimestamp
TerminationSummary.With(prometheus.Labels{
metrics.ProvisionerLabel: n.Labels[v1alpha5.ProvisionerNameLabelKey],
metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey],
metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey],
}).Observe(time.Since(stored.DeletionTimestamp.Time).Seconds())
logging.FromContext(ctx).Infof("deleted node")
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/termination/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ var (
Help: "The time taken between a node's deletion request and the removal of its finalizer",
Objectives: metrics.SummaryObjectives(),
},
[]string{metrics.ProvisionerLabel, metrics.NodePoolLabel},
[]string{metrics.NodePoolLabel},
)
)

Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/termination/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func TestAPIs(t *testing.T) {

var _ = BeforeSuite(func() {
fakeClock = clock.NewFakeClock(time.Now())
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...), test.WithFieldIndexers(test.MachineFieldIndexer(ctx), test.NodeClaimFieldIndexer(ctx)))
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...), test.WithFieldIndexers(test.NodeClaimFieldIndexer(ctx)))

cloudProvider = fake.NewCloudProvider()
recorder = test.NewEventRecorder()
Expand Down
19 changes: 3 additions & 16 deletions pkg/controllers/nodeclaim/disruption/drift.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (

"github.com/samber/lo"

"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/metrics"
Expand Down Expand Up @@ -112,24 +111,12 @@ func (d *Drift) isDrifted(ctx context.Context, nodePool *v1beta1.NodePool, nodeC
// Eligible fields for static drift are described in the docs
// https://karpenter.sh/docs/concepts/deprovisioning/#drift
func areStaticFieldsDrifted(nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) cloudprovider.DriftReason {
var ownerHashKey string
if nodeClaim.IsMachine {
ownerHashKey = v1alpha5.ProvisionerHashAnnotationKey
} else {
ownerHashKey = v1beta1.NodePoolHashAnnotationKey
}
nodePoolHash, foundHashNodePool := nodePool.Annotations[ownerHashKey]
nodeClaimHash, foundHashNodeClaim := nodeClaim.Annotations[ownerHashKey]
nodePoolHash, foundHashNodePool := nodePool.Annotations[v1beta1.NodePoolHashAnnotationKey]
nodeClaimHash, foundHashNodeClaim := nodeClaim.Annotations[v1beta1.NodePoolHashAnnotationKey]
if !foundHashNodePool || !foundHashNodeClaim {
return ""
}
if nodePoolHash != nodeClaimHash {
if nodeClaim.IsMachine {
return ProvisionerDrifted
}
return NodePoolDrifted
}
return ""
return lo.Ternary(nodePoolHash != nodeClaimHash, NodePoolDrifted, "")
}

func areRequirementsDrifted(nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) cloudprovider.DriftReason {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/garbagecollection/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc
"provider-id", nodeClaims[i].Status.ProviderID,
"nodepool", nodeClaims[i].Labels[v1beta1.NodePoolLabelKey],
).
Debugf("garbage collecting %s with no cloudprovider representation", lo.Ternary(nodeClaims[i].IsMachine, "machine", "nodeclaim"))
Debugf("garbage collecting nodeclaim with no cloudprovider representation")
nodeclaimutil.TerminatedCounter(nodeClaims[i], "garbage_collected").Inc()
})
if err = multierr.Combine(errs...); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/lifecycle/initialization.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func (i *Initialization) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeC
return reconcile.Result{}, err
}
}
logging.FromContext(ctx).Infof("initialized %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"))
logging.FromContext(ctx).Infof("initialized nodeclaim")
nodeClaim.StatusConditions().MarkTrue(v1beta1.Initialized)
nodeclaimutil.InitializedCounter(nodeClaim).Inc()
return reconcile.Result{}, nil
Expand Down
37 changes: 4 additions & 33 deletions pkg/controllers/nodeclaim/lifecycle/launch.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/apis/v1beta1"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/events"
Expand All @@ -51,14 +50,10 @@ func (l *Launch) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (r
// One of the following scenarios can happen with a NodeClaim that isn't marked as launched:
// 1. It was already launched by the CloudProvider but the client-go cache wasn't updated quickly enough or
// patching failed on the status. In this case, we use the in-memory cached value for the created NodeClaim.
// 2. It is a "linked" NodeClaim, which implies that the CloudProvider NodeClaim already exists for the NodeClaim CR, but we
// need to grab info from the CloudProvider to get details on the NodeClaim.
// 3. It is a standard NodeClaim launch where we should call CloudProvider Create() and fill in details of the launched
// 2. It is a standard NodeClaim launch where we should call CloudProvider Create() and fill in details of the launched
// NodeClaim into the NodeClaim CR.
if ret, ok := l.cache.Get(string(nodeClaim.UID)); ok {
created = ret.(*v1beta1.NodeClaim)
} else if _, ok := nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey]; ok {
created, err = l.linkNodeClaim(ctx, nodeClaim)
} else {
created, err = l.launchNodeClaim(ctx, nodeClaim)
}
Expand All @@ -77,30 +72,6 @@ func (l *Launch) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (r
return reconcile.Result{}, nil
}

func (l *Launch) linkNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (*v1beta1.NodeClaim, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey]))
created, err := l.cloudProvider.Get(ctx, nodeClaim.Annotations[v1alpha5.MachineLinkedAnnotationKey])
if err != nil {
if !cloudprovider.IsNodeClaimNotFoundError(err) {
nodeClaim.StatusConditions().MarkFalse(v1beta1.Launched, "LinkFailed", truncateMessage(err.Error()))
return nil, fmt.Errorf("linking, %w", err)
}
if err = nodeclaimutil.Delete(ctx, l.kubeClient, nodeClaim); err != nil {
return nil, client.IgnoreNotFound(err)
}
logging.FromContext(ctx).Debugf("garbage collected with no cloudprovider representation")
nodeclaimutil.TerminatedCounter(nodeClaim, "garbage_collected").Inc()
return nil, nil
}
logging.FromContext(ctx).With(
"provider-id", created.Status.ProviderID,
"instance-type", created.Labels[v1.LabelInstanceTypeStable],
"zone", created.Labels[v1.LabelTopologyZone],
"capacity-type", created.Labels[v1alpha5.LabelCapacityType],
"allocatable", created.Status.Allocatable).Infof("linked %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"))
return created, nil
}

func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (*v1beta1.NodeClaim, error) {
created, err := l.cloudProvider.Create(ctx, nodeClaim)
if err != nil {
Expand All @@ -116,18 +87,18 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla
case cloudprovider.IsNodeClassNotReadyError(err):
l.recorder.Publish(NodeClassNotReadyEvent(nodeClaim, err))
nodeClaim.StatusConditions().MarkFalse(v1beta1.Launched, "LaunchFailed", truncateMessage(err.Error()))
return nil, fmt.Errorf("launching %s, %w", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"), err)
return nil, fmt.Errorf("launching nodeclaim, %w", err)
default:
nodeClaim.StatusConditions().MarkFalse(v1beta1.Launched, "LaunchFailed", truncateMessage(err.Error()))
return nil, fmt.Errorf("launching %s, %w", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"), err)
return nil, fmt.Errorf("launching nodeclaim, %w", err)
}
}
logging.FromContext(ctx).With(
"provider-id", created.Status.ProviderID,
"instance-type", created.Labels[v1.LabelInstanceTypeStable],
"zone", created.Labels[v1.LabelTopologyZone],
"capacity-type", created.Labels[v1beta1.CapacityTypeLabelKey],
"allocatable", created.Status.Allocatable).Infof("launched %s", lo.Ternary(nodeClaim.IsMachine, "machine", "nodeclaim"))
"allocatable", created.Status.Allocatable).Infof("launched nodeclaim")
return created, nil
}

Expand Down