Skip to content

Commit

Permalink
Handle Provisioners with provider field
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathan-innis committed Oct 27, 2023
1 parent de4388d commit 7fc98c7
Show file tree
Hide file tree
Showing 9 changed files with 203 additions and 59 deletions.
7 changes: 3 additions & 4 deletions tools/karpenter-convert/cmd/karpenter-convert/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@ package main
import (
"os"

"github.com/aws/karpenter/tools/karpenter-convert/pkg/convert"

"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/genericiooptions"
"k8s.io/component-base/cli"
cmdutil "k8s.io/kubectl/pkg/cmd/util"

"k8s.io/component-base/cli"
"github.com/aws/karpenter/tools/karpenter-convert/pkg/convert"
)

func main() {
kubeConfigFlags := genericclioptions.NewConfigFlags(false).WithDeprecatedPasswordFlag()
kubeConfigFlags := genericclioptions.NewConfigFlags(false)
f := cmdutil.NewFactory(kubeConfigFlags)
cmd := convert.NewCmd(f, genericiooptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr})
code := cli.Run(cmd)
Expand Down
107 changes: 62 additions & 45 deletions tools/karpenter-convert/pkg/convert/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,21 +65,17 @@ func NewCmd(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Comm
PrintFlags: genericclioptions.NewPrintFlags("converted").WithDefaultOutput("yaml"),
IOStreams: ioStreams,
}

var rootCmd = &cobra.Command{
Use: "karpenter-convert",
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, cmd))
cmdutil.CheckErr(o.RunConvert())
},
}

rootCmd.Flags().BoolVar(&o.IgnoreDefaults, "ignore-defaults", o.IgnoreDefaults, "Ignore defining default requirements when migrating Provisioners to NodePool.")
cmdutil.AddJsonFilenameFlag(rootCmd.Flags(), &o.Filenames, "Filename, directory, or URL to files to need to get converted.")
rootCmd.Flags().BoolVarP(&o.Recursive, "recursive", "R", o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.")

o.PrintFlags.AddFlags(rootCmd)

return rootCmd
}

Expand Down Expand Up @@ -143,21 +139,20 @@ func (o *Context) RunConvert() error {
continue
}

obj, err := convert(info.Object, o)
objs, err := convert(info.Object, o)
if err != nil {
fmt.Fprintln(o.IOStreams.ErrOut, err.Error())
lo.Must(fmt.Fprintln(o.IOStreams.ErrOut, err.Error()))
} else {
var buffer bytes.Buffer
writer := io.Writer(&buffer)

if err := o.Printer.PrintObj(obj, writer); err != nil {
fmt.Fprintln(o.IOStreams.ErrOut, err.Error())
}

output := dropFields(buffer)

if _, err := o.Out.Write([]byte(output)); err != nil {
fmt.Fprintln(o.IOStreams.ErrOut, err.Error())
for _, obj := range objs {
var buffer bytes.Buffer
writer := io.Writer(&buffer)
if err = o.Printer.PrintObj(obj, writer); err != nil {
lo.Must(fmt.Fprintln(o.IOStreams.ErrOut, err.Error()))
}
output := dropFields(buffer)
if _, err = o.Out.Write([]byte(output)); err != nil {
lo.Must(fmt.Fprintln(o.IOStreams.ErrOut, err.Error()))
}
}
}
}
Expand All @@ -169,43 +164,55 @@ func dropFields(buffer bytes.Buffer) string {
output = strings.Replace(output, "status: {}\n", "", -1)
output = strings.Replace(output, " creationTimestamp: null\n", "", -1)
output = strings.Replace(output, " resources: {}\n", "", -1)

return output
}

// Convert a Provisioner into a NodePool and an AWSNodeTemplate into a NodeClass.
// If the input is of a different kind, returns an error
func convert(resource runtime.Object, o *Context) (runtime.Object, error) {
func convert(resource runtime.Object, o *Context) ([]runtime.Object, error) {
kind := resource.GetObjectKind().GroupVersionKind().Kind
switch kind {
case "Provisioner":
return convertProvisioner(resource, o), nil
provisioner := resource.(*corev1alpha5.Provisioner)

var providerObj runtime.Object
var err error
if provider := provisioner.Spec.Provider; provider != nil {
providerObj, err = convertProvider(provider.Raw, provisioner.Name)
if err != nil {
return nil, fmt.Errorf("converting spec.provider for Provisioner, %w", err)
}
provisioner.Spec.ProviderRef = &corev1alpha5.MachineTemplateRef{
Name: provisioner.Name,
}
}
return lo.WithoutEmpty([]runtime.Object{convertProvisioner(provisioner, o), providerObj}), nil
case "AWSNodeTemplate":
return convertNodeTemplate(resource), nil
nodeTemplate := resource.(*v1alpha1.AWSNodeTemplate)
return []runtime.Object{convertNodeTemplate(nodeTemplate)}, nil
default:
return nil, fmt.Errorf("unknown kind. expected one of Provisioner, AWSNodeTemplate. got %s", kind)
}
}

func convertNodeTemplate(resource runtime.Object) runtime.Object {
nodetemplate := resource.(*v1alpha1.AWSNodeTemplate)
func convertNodeTemplate(nodeTemplate *v1alpha1.AWSNodeTemplate) runtime.Object {
// If the AMIFamily wasn't specified, then we know that it should be AL2 for the conversion
if nodetemplate.Spec.AMIFamily == nil {
nodetemplate.Spec.AMIFamily = &v1beta1.AMIFamilyAL2
if nodeTemplate.Spec.AMIFamily == nil {
nodeTemplate.Spec.AMIFamily = &v1beta1.AMIFamilyAL2
}

nodeclass := nodeclassutil.New(nodetemplate)
nodeclass := nodeclassutil.New(nodeTemplate)
nodeclass.TypeMeta = metav1.TypeMeta{
Kind: "EC2NodeClass",
APIVersion: v1beta1.SchemeGroupVersion.String(),
}

// From the input NodeTemplate, keep only name, labels, annotations and finalizers
nodeclass.ObjectMeta = metav1.ObjectMeta{
Name: nodetemplate.Name,
Labels: nodetemplate.Labels,
Annotations: nodetemplate.Annotations,
Finalizers: nodetemplate.Finalizers,
Name: nodeTemplate.Name,
Labels: nodeTemplate.Labels,
Annotations: nodeTemplate.Annotations,
Finalizers: nodeTemplate.Finalizers,
}

// Cleanup the status provided in input
Expand All @@ -216,31 +223,41 @@ func convertNodeTemplate(resource runtime.Object) runtime.Object {
return nodeclass
}

func convertProvisioner(resource runtime.Object, o *Context) runtime.Object {
coreprovisioner := resource.(*corev1alpha5.Provisioner)

func convertProvisioner(coreProvisioner *corev1alpha5.Provisioner, o *Context) runtime.Object {
if !o.IgnoreDefaults {
provisioner := lo.ToPtr(v1alpha5.Provisioner(lo.FromPtr(coreprovisioner)))
provisioner := lo.ToPtr(v1alpha5.Provisioner(lo.FromPtr(coreProvisioner)))
provisioner.SetDefaults(context.Background())
coreprovisioner = lo.ToPtr(corev1alpha5.Provisioner(lo.FromPtr(provisioner)))
coreProvisioner = lo.ToPtr(corev1alpha5.Provisioner(lo.FromPtr(provisioner)))
}

nodepool := nodepoolutil.New(coreprovisioner)
nodepool.TypeMeta = metav1.TypeMeta{
nodePool := nodepoolutil.New(coreProvisioner)
nodePool.TypeMeta = metav1.TypeMeta{
Kind: "NodePool",
APIVersion: corev1beta1.SchemeGroupVersion.String(),
}

// From the input Provisioner, keep only name, labels, annotations and finalizers
nodepool.ObjectMeta = metav1.ObjectMeta{
Name: coreprovisioner.Name,
Labels: coreprovisioner.Labels,
Annotations: coreprovisioner.Annotations,
Finalizers: coreprovisioner.Finalizers,
nodePool.ObjectMeta = metav1.ObjectMeta{
Name: coreProvisioner.Name,
Labels: coreProvisioner.Labels,
Annotations: coreProvisioner.Annotations,
Finalizers: coreProvisioner.Finalizers,
}

// Cleanup the status provided in input
nodepool.Status = corev1beta1.NodePoolStatus{}
nodePool.Status = corev1beta1.NodePoolStatus{}
return nodePool
}

return nodepool
func convertProvider(provider []byte, provisionerName string) (runtime.Object, error) {
aws, err := v1alpha1.DeserializeProvider(provider)
if err != nil {
return nil, fmt.Errorf("converting provider, %w", err)
}
nodeTemplate := &v1alpha1.AWSNodeTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: provisionerName,
},
}
nodeTemplate.Spec.AWS = *aws
return convertNodeTemplate(nodeTemplate), nil
}
4 changes: 2 additions & 2 deletions tools/karpenter-convert/pkg/convert/testdata/nodeclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@ spec:
role: $KARPENTER_NODE_ROLE
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
tags:
MyBackupTag: "yes"
MyTag: "1234"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ spec:
role: $KARPENTER_NODE_ROLE
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
tags:
app.kubernetes.io/created-by: eks-workshop
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
apiVersion: karpenter.sh/v1beta1
kind: NodePool
metadata:
name: default
spec:
disruption:
consolidationPolicy: WhenUnderutilized
expireAfter: Never
template:
metadata:
annotations:
example.com/owner: my-team
labels:
billing-team: my-team
spec:
nodeClassRef:
name: default
requirements:
- key: kubernetes.io/arch
operator: Exists
- key: karpenter.sh/capacity-type
operator: In
values:
- spot
- on-demand
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.k8s.aws/instance-category
operator: In
values:
- c
- m
- r
- key: karpenter.k8s.aws/instance-generation
operator: Gt
values:
- "2"
startupTaints:
- effect: NoSchedule
key: example.com/another-taint
taints:
- effect: NoSchedule
key: example.com/special-taint
---
apiVersion: karpenter.k8s.aws/v1beta1
kind: EC2NodeClass
metadata:
name: default
spec:
amiFamily: AL2
blockDeviceMappings:
- deviceName: /dev/xvda
ebs:
deleteOnTermination: true
encrypted: true
iops: 10000
kmsKeyID: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
snapshotID: snap-0123456789
throughput: 125
volumeSize: 100Gi
volumeType: gp3
context: test-context-id
metadataOptions:
httpEndpoint: disabled
httpProtocolIPv6: enabled
httpPutResponseHopLimit: 2
httpTokens: required
role: $KARPENTER_NODE_ROLE
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: karpenter-demo
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: karpenter-demo
tags:
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@ spec:
aws::name: "test name"
MyTag: '*'
subnetSelector:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
securityGroupSelector:
karpenter.sh/discovery: $MY_CLUSTER_NAME
karpenter.sh/discovery: karpenter-demo
blockDeviceMappings:
- deviceName: /dev/xvdb
ebs:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ metadata:
uid: 1da65734-30f3-47c2-9bec-8b671a71fb67
spec:
securityGroupSelector:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
subnetSelector:
karpenter.sh/discovery: eks-workshop-camigration
karpenter.sh/discovery: karpenter-demo
tags:
app.kubernetes.io/created-by: eks-workshop
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
status:
securityGroups:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
consolidation:
enabled: true
taints:
- key: example.com/special-taint
effect: NoSchedule
startupTaints:
- key: example.com/another-taint
effect: NoSchedule
labels:
billing-team: my-team
annotations:
example.com/owner: "my-team"
requirements:
- key: "kubernetes.io/arch"
operator: Exists
- key: "karpenter.sh/capacity-type" # If not included, the webhook for the AWS cloud provider will default to on-demand
operator: In
values: ["spot", "on-demand"]
provider:
subnetSelector:
karpenter.sh/discovery: karpenter-demo
securityGroupSelector:
karpenter.sh/discovery: karpenter-demo
context: test-context-id
instanceProfile: "KarpenterInstanceProfile-karpenter-demo"
tags:
app.kubernetes.io/created-by: karpenter-demo
aws-node-termination-handler/managed: "true"
metadataOptions:
httpEndpoint: disabled
httpProtocolIPv6: enabled
httpPutResponseHopLimit: 2
httpTokens: required
blockDeviceMappings:
- deviceName: /dev/xvda
ebs:
volumeSize: 100Gi
volumeType: gp3
iops: 10000
encrypted: true
kmsKeyID: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
deleteOnTermination: true
throughput: 125
snapshotID: snap-0123456789

0 comments on commit 7fc98c7

Please sign in to comment.