Skip to content

Commit

Permalink
chore: Handle Provisioners with provider field for karpenter-convert (
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathan-innis committed Oct 30, 2023
1 parent 7023602 commit 23e58f2
Show file tree
Hide file tree
Showing 9 changed files with 209 additions and 56 deletions.
7 changes: 3 additions & 4 deletions tools/karpenter-convert/cmd/karpenter-convert/main.go
Expand Up @@ -17,17 +17,16 @@ package main
import (
"os"

"github.com/aws/karpenter/tools/karpenter-convert/pkg/convert"

"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/genericiooptions"
"k8s.io/component-base/cli"
cmdutil "k8s.io/kubectl/pkg/cmd/util"

"k8s.io/component-base/cli"
"github.com/aws/karpenter/tools/karpenter-convert/pkg/convert"
)

func main() {
kubeConfigFlags := genericclioptions.NewConfigFlags(false).WithDeprecatedPasswordFlag()
kubeConfigFlags := genericclioptions.NewConfigFlags(false)
f := cmdutil.NewFactory(kubeConfigFlags)
cmd := convert.NewCmd(f, genericiooptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr})
os.Exit(cli.Run(cmd))
Expand Down
110 changes: 68 additions & 42 deletions tools/karpenter-convert/pkg/convert/convert.go
Expand Up @@ -75,7 +75,6 @@ func NewCmd(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Comm
rootCmd.Flags().BoolVar(&o.IgnoreDefaults, "ignore-defaults", o.IgnoreDefaults, "Ignore defining default requirements when migrating Provisioners to NodePool.")
cmdutil.AddJsonFilenameFlag(rootCmd.Flags(), &o.Filenames, "Filename, directory, or URL to files to need to get converted.")
rootCmd.Flags().BoolVarP(&o.Recursive, "recursive", "R", o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.")

o.PrintFlags.AddFlags(rootCmd)
return rootCmd
}
Expand Down Expand Up @@ -132,18 +131,21 @@ func (o *Context) RunConvert() error {
if info.Object == nil {
continue
}
obj, err := convert(info.Object, o)

objs, err := convert(info.Object, o)
if err != nil {
return err
}
var buffer bytes.Buffer
writer := io.Writer(&buffer)
if err = o.Printer.PrintObj(obj, writer); err != nil {
return err
}
output := dropFields(buffer)
if _, err = o.Out.Write([]byte(output)); err != nil {
return err
for _, obj := range objs {
var buffer bytes.Buffer
writer := io.Writer(&buffer)
if err = o.Printer.PrintObj(obj, writer); err != nil {
return err
}
output := dropFields(buffer)
if _, err = o.Out.Write([]byte(output)); err != nil {
return err
}
}
}
return nil
Expand All @@ -154,48 +156,62 @@ func dropFields(buffer bytes.Buffer) string {
output = strings.Replace(output, "status: {}\n", "", -1)
output = strings.Replace(output, " creationTimestamp: null\n", "", -1)
output = strings.Replace(output, " resources: {}\n", "", -1)

return output
}

// Convert a Provisioner into a NodePool and an AWSNodeTemplate into a NodeClass.
// If the input is of a different kind, returns an error
func convert(resource runtime.Object, o *Context) (runtime.Object, error) {
func convert(resource runtime.Object, o *Context) ([]runtime.Object, error) {
kind := resource.GetObjectKind().GroupVersionKind().Kind
switch kind {
case "Provisioner":
return convertProvisioner(resource, o), nil
provisioner := resource.(*corev1alpha5.Provisioner)

var providerObj *v1beta1.EC2NodeClass
var err error
if provider := provisioner.Spec.Provider; provider != nil {
providerObj, err = convertProvider(provider.Raw, provisioner.Name)
if err != nil {
return nil, fmt.Errorf("converting spec.provider for Provisioner, %w", err)
}
provisioner.Spec.ProviderRef = &corev1alpha5.MachineTemplateRef{
Name: providerObj.Name,
}
}
return lo.WithoutEmpty([]runtime.Object{convertProvisioner(provisioner, o), providerObj}), nil
case "AWSNodeTemplate":
return convertNodeTemplate(resource)
nodeTemplate := resource.(*v1alpha1.AWSNodeTemplate)
nodeClass, err := convertNodeTemplate(nodeTemplate)
if err != nil {
return nil, fmt.Errorf("converting AWSNodeTemplate, %w", err)
}
return []runtime.Object{nodeClass}, nil
default:
return nil, fmt.Errorf("unknown kind. expected one of Provisioner, AWSNodeTemplate. got %s", kind)
}
}

func convertNodeTemplate(resource runtime.Object) (runtime.Object, error) {
nodetemplate := resource.(*v1alpha1.AWSNodeTemplate)

if nodetemplate.Spec.LaunchTemplateName != nil {
return nil, fmt.Errorf(`cannot convert AWSNodeTemplate with "spec.launchTemplate"`)
func convertNodeTemplate(nodeTemplate *v1alpha1.AWSNodeTemplate) (*v1beta1.EC2NodeClass, error) {
if nodeTemplate.Spec.LaunchTemplateName != nil {
return nil, fmt.Errorf(`cannot convert with "spec.launchTemplate"`)
}

// If the AMIFamily wasn't specified, then we know that it should be AL2 for the conversion
if nodetemplate.Spec.AMIFamily == nil {
nodetemplate.Spec.AMIFamily = &v1beta1.AMIFamilyAL2
if nodeTemplate.Spec.AMIFamily == nil {
nodeTemplate.Spec.AMIFamily = &v1beta1.AMIFamilyAL2
}

nodeclass := nodeclassutil.New(nodetemplate)
nodeclass := nodeclassutil.New(nodeTemplate)
nodeclass.TypeMeta = metav1.TypeMeta{
Kind: "EC2NodeClass",
APIVersion: v1beta1.SchemeGroupVersion.String(),
}

// From the input NodeTemplate, keep only name, labels, annotations and finalizers
nodeclass.ObjectMeta = metav1.ObjectMeta{
Name: nodetemplate.Name,
Labels: nodetemplate.Labels,
Annotations: nodetemplate.Annotations,
Finalizers: nodetemplate.Finalizers,
Name: nodeTemplate.Name,
Labels: nodeTemplate.Labels,
Annotations: nodeTemplate.Annotations,
Finalizers: nodeTemplate.Finalizers,
}

// Cleanup the status provided in input
Expand All @@ -206,31 +222,41 @@ func convertNodeTemplate(resource runtime.Object) (runtime.Object, error) {
return nodeclass, nil
}

func convertProvisioner(resource runtime.Object, o *Context) runtime.Object {
coreprovisioner := resource.(*corev1alpha5.Provisioner)

func convertProvisioner(coreProvisioner *corev1alpha5.Provisioner, o *Context) *corev1beta1.NodePool {
if !o.IgnoreDefaults {
provisioner := lo.ToPtr(v1alpha5.Provisioner(lo.FromPtr(coreprovisioner)))
provisioner := lo.ToPtr(v1alpha5.Provisioner(lo.FromPtr(coreProvisioner)))
provisioner.SetDefaults(context.Background())
coreprovisioner = lo.ToPtr(corev1alpha5.Provisioner(lo.FromPtr(provisioner)))
coreProvisioner = lo.ToPtr(corev1alpha5.Provisioner(lo.FromPtr(provisioner)))
}

nodepool := nodepoolutil.New(coreprovisioner)
nodepool.TypeMeta = metav1.TypeMeta{
nodePool := nodepoolutil.New(coreProvisioner)
nodePool.TypeMeta = metav1.TypeMeta{
Kind: "NodePool",
APIVersion: corev1beta1.SchemeGroupVersion.String(),
}

// From the input Provisioner, keep only name, labels, annotations and finalizers
nodepool.ObjectMeta = metav1.ObjectMeta{
Name: coreprovisioner.Name,
Labels: coreprovisioner.Labels,
Annotations: coreprovisioner.Annotations,
Finalizers: coreprovisioner.Finalizers,
nodePool.ObjectMeta = metav1.ObjectMeta{
Name: coreProvisioner.Name,
Labels: coreProvisioner.Labels,
Annotations: coreProvisioner.Annotations,
Finalizers: coreProvisioner.Finalizers,
}

// Cleanup the status provided in input
nodepool.Status = corev1beta1.NodePoolStatus{}
nodePool.Status = corev1beta1.NodePoolStatus{}
return nodePool
}

return nodepool
func convertProvider(provider []byte, provisionerName string) (*v1beta1.EC2NodeClass, error) {
aws, err := v1alpha1.DeserializeProvider(provider)
if err != nil {
return nil, fmt.Errorf("converting provider, %w", err)
}
nodeTemplate := &v1alpha1.AWSNodeTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: provisionerName,
},
}
nodeTemplate.Spec.AWS = *aws
return convertNodeTemplate(nodeTemplate)
}
4 changes: 2 additions & 2 deletions tools/karpenter-convert/pkg/convert/testdata/nodeclass.yaml
Expand Up @@ -34,10 +34,10 @@ spec:
role: $KARPENTER_NODE_ROLE
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
tags:
MyBackupTag: "yes"
MyTag: "1234"
Expand Down
Expand Up @@ -7,10 +7,10 @@ spec:
role: $KARPENTER_NODE_ROLE
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
tags:
app.kubernetes.io/created-by: eks-workshop
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
@@ -0,0 +1,79 @@
apiVersion: karpenter.sh/v1beta1
kind: NodePool
metadata:
name: default
spec:
disruption:
consolidationPolicy: WhenUnderutilized
expireAfter: Never
template:
metadata:
annotations:
example.com/owner: my-team
labels:
billing-team: my-team
spec:
nodeClassRef:
name: default
requirements:
- key: kubernetes.io/arch
operator: Exists
- key: karpenter.sh/capacity-type
operator: In
values:
- spot
- on-demand
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.k8s.aws/instance-category
operator: In
values:
- c
- m
- r
- key: karpenter.k8s.aws/instance-generation
operator: Gt
values:
- "2"
startupTaints:
- effect: NoSchedule
key: example.com/another-taint
taints:
- effect: NoSchedule
key: example.com/special-taint
---
apiVersion: karpenter.k8s.aws/v1beta1
kind: EC2NodeClass
metadata:
name: default
spec:
amiFamily: AL2
blockDeviceMappings:
- deviceName: /dev/xvda
ebs:
deleteOnTermination: true
encrypted: true
iops: 10000
kmsKeyID: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
snapshotID: snap-0123456789
throughput: 125
volumeSize: 100Gi
volumeType: gp3
context: test-context-id
metadataOptions:
httpEndpoint: disabled
httpProtocolIPv6: enabled
httpPutResponseHopLimit: 2
httpTokens: required
role: $KARPENTER_NODE_ROLE
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: karpenter-demo
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: karpenter-demo
tags:
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
Expand Up @@ -10,9 +10,9 @@ spec:
aws::name: "test name"
MyTag: '*'
subnetSelector:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
securityGroupSelector:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
blockDeviceMappings:
- deviceName: /dev/xvdb
ebs:
Expand Down
Expand Up @@ -8,11 +8,11 @@ metadata:
uid: 1da65734-30f3-47c2-9bec-8b671a71fb67
spec:
securityGroupSelector:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
subnetSelector:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
tags:
app.kubernetes.io/created-by: eks-workshop
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
status:
securityGroups:
Expand Down
@@ -0,0 +1,49 @@
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
consolidation:
enabled: true
taints:
- key: example.com/special-taint
effect: NoSchedule
startupTaints:
- key: example.com/another-taint
effect: NoSchedule
labels:
billing-team: my-team
annotations:
example.com/owner: "my-team"
requirements:
- key: "kubernetes.io/arch"
operator: Exists
- key: "karpenter.sh/capacity-type" # If not included, the webhook for the AWS cloud provider will default to on-demand
operator: In
values: ["spot", "on-demand"]
provider:
subnetSelector:
karpenter.sh/discovery: karpenter-demo
securityGroupSelector:
karpenter.sh/discovery: karpenter-demo
context: test-context-id
instanceProfile: "KarpenterInstanceProfile-karpenter-demo"
tags:
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
metadataOptions:
httpEndpoint: disabled
httpProtocolIPv6: enabled
httpPutResponseHopLimit: 2
httpTokens: required
blockDeviceMappings:
- deviceName: /dev/xvda
ebs:
volumeSize: 100Gi
volumeType: gp3
iops: 10000
encrypted: true
kmsKeyID: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
deleteOnTermination: true
throughput: 125
snapshotID: snap-0123456789

0 comments on commit 23e58f2

Please sign in to comment.