From 1ffac51633ce8896ee60564897b8029ce1c84193 Mon Sep 17 00:00:00 2001 From: Enxebre Date: Wed, 8 Aug 2018 18:43:57 +0200 Subject: [PATCH 1/3] use ignition --- examples/machine.yaml | 14 ++--- machineactuator.go | 128 +++++++++++++++++++++++------------------- 2 files changed, 77 insertions(+), 65 deletions(-) diff --git a/examples/machine.yaml b/examples/machine.yaml index 04770953..8aa82828 100644 --- a/examples/machine.yaml +++ b/examples/machine.yaml @@ -17,7 +17,7 @@ spec: value: apiVersion: awsproviderconfig/v1alpha1 kind: AWSClusterProviderConfig - clusterId: test + clusterId: meh.tectonic.kuwit.rocks clusterVersionRef: namespace: test name: test @@ -31,7 +31,7 @@ spec: sslSecret: name: test-certs region: eu-west-1 - keyPairName: test + keyPairName: tectonic defaultHardwareSpec: aws: instanceType: t1.micro @@ -49,7 +49,7 @@ spec: apiVersion: "cluster.k8s.io/v1alpha1" kind: Machine metadata: - name: test + name: extra-worker namespace: test generateName: vs-master- labels: @@ -59,12 +59,12 @@ spec: value: apiVersion: awsproviderconfig/v1alpha1 kind: AWSMachineProviderConfig - clusterId: test + clusterId: meh.tectonic.kuwit.rocks clusterHardware: aws: accountSecret: name: test-aws-creds - keyPairName: test + keyPairName: tectonic region: eu-west-1 sshSecret: name: test-ssh-key @@ -76,8 +76,8 @@ spec: instanceType: t1.micro infra: false vmImage: - # CoreOS-stable-1520.5.0 - awsImage: ami-03f6257a + # CoreOS-beta-1828.3.0-hvm + awsImage: ami-0518e1ac70d8a3389 versions: kubelet: 1.10.1 controlPlane: 1.10.1 diff --git a/machineactuator.go b/machineactuator.go index 599305fe..b2553f0c 100644 --- a/machineactuator.go +++ b/machineactuator.go @@ -27,8 +27,8 @@ import ( "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" capicommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" @@ -37,10 +37,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + awsconfigv1 "github.com/enxebre/cluster-api-provider-aws/awsproviderconfig/v1alpha1" cov1 "github.com/enxebre/cluster-api-provider-aws/awsproviderconfig/v1alpha1" "github.com/openshift/cluster-operator/pkg/controller" clustoplog "github.com/openshift/cluster-operator/pkg/logging" - awsconfigv1 "github.com/enxebre/cluster-api-provider-aws/awsproviderconfig/v1alpha1" ) const ( @@ -79,8 +79,8 @@ var stateMask int64 = 0xFF // Actuator is the AWS-specific actuator for the Cluster API machine controller type Actuator struct { - kubeClient kubernetes.Interface - clusterClient clusterclient.Interface + kubeClient kubernetes.Interface + clusterClient clusterclient.Interface //codecFactory serializer.CodecFactory defaultAvailabilityZone string logger *log.Entry @@ -103,15 +103,15 @@ func NewActuator(kubeClient kubernetes.Interface, clusterClient clusterclient.In } actuator := &Actuator{ - kubeClient: kubeClient, - clusterClient: clusterClient, + kubeClient: kubeClient, + clusterClient: clusterClient, //codecFactory: coapi.Codecs, defaultAvailabilityZone: defaultAvailabilityZone, logger: logger, clientBuilder: NewClient, userDataGenerator: generateUserData, awsProviderConfigCodec: codec, - scheme: scheme, + scheme: scheme, } return actuator } @@ -185,7 +185,7 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. } // Describe VPC - vpcName := awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID + vpcName := "meh.tectonic.kuwit.rocks" vpcNameFilter := "tag:Name" describeVpcsRequest := ec2.DescribeVpcsInput{ Filters: []*ec2.Filter{{Name: &vpcNameFilter, Values: []*string{&vpcName}}}, @@ -259,10 +259,12 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. // Add tags to the created machine tagList := []*ec2.Tag{ - {Key: aws.String("clusterid"), Value: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID)}, - {Key: aws.String("host-type"), Value: aws.String(hostType)}, - {Key: aws.String("sub-host-type"), Value: aws.String(subHostType)}, - {Key: aws.String("kubernetes.io/cluster/" + awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID), Value: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID)}, + //{Key: aws.String("clusterid"), Value: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID)}, + //{Key: aws.String("host-type"), Value: aws.String(hostType)}, + //{Key: aws.String("sub-host-type"), Value: aws.String(subHostType)}, + ////{Key: aws.String("kubernetes.io/cluster/" + awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID), Value: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID)}, + {Key: aws.String("kubernetes.io/cluster/meh"), Value: aws.String("owned")}, + {Key: aws.String("tectonicClusterID"), Value: aws.String("447c6a4c-92a9-0266-3a23-9e3495006e24")}, {Key: aws.String("Name"), Value: aws.String(machine.Name)}, } tagInstance := &ec2.TagSpecification{ @@ -275,24 +277,24 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. } // For now, these are fixed - blkDeviceMappings := []*ec2.BlockDeviceMapping{ - { - DeviceName: aws.String("/dev/sda"), - Ebs: &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(true), - VolumeSize: aws.Int64(100), - VolumeType: aws.String("gp2"), - }, - }, - //{ - // DeviceName: aws.String("/dev/sdb"), - // Ebs: &ec2.EbsBlockDevice{ - // DeleteOnTermination: aws.Bool(true), - // VolumeSize: aws.Int64(100), - // VolumeType: aws.String("gp2"), - // }, - //}, - } + //blkDeviceMappings := []*ec2.BlockDeviceMapping{ + // { + // DeviceName: aws.String("/dev/sda"), + // Ebs: &ec2.EbsBlockDevice{ + // DeleteOnTermination: aws.Bool(true), + // VolumeSize: aws.Int64(100), + // VolumeType: aws.String("gp2"), + // }, + // }, + // //{ + // // DeviceName: aws.String("/dev/sdb"), + // // Ebs: &ec2.EbsBlockDevice{ + // // DeleteOnTermination: aws.Bool(true), + // // VolumeSize: aws.Int64(100), + // // VolumeType: aws.String("gp2"), + // // }, + // //}, + //} // Only compute nodes should get user data, and it's quite important that masters do not as the // AWS actuator for these is running on the root CO cluster currently, and we do not want to leak @@ -301,7 +303,11 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. //if err != nil { // return nil, err //} - userDataEnc := base64.StdEncoding.EncodeToString([]byte("")) + //userData, err := GenerateIgnConfig() + //if err != nil { + // return nil, err + //} + userDataEnc := base64.StdEncoding.EncodeToString([]byte(userDataTemplate)) inputConfig := ec2.RunInstancesInput{ ImageId: describeAMIResult.Images[0].ImageId, @@ -312,10 +318,10 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ Name: aws.String(iamRole(machine)), }, - BlockDeviceMappings: blkDeviceMappings, - TagSpecifications: []*ec2.TagSpecification{tagInstance, tagVolume}, - NetworkInterfaces: networkInterfaces, - UserData: &userDataEnc, + //BlockDeviceMappings: blkDeviceMappings, + TagSpecifications: []*ec2.TagSpecification{tagInstance, tagVolume}, + NetworkInterfaces: networkInterfaces, + UserData: &userDataEnc, InstanceInitiatedShutdownBehavior: aws.String(shutdownBehavior), } @@ -538,35 +544,41 @@ func getClusterID(machine *clusterv1.Machine) (string, error) { //} //return coMachineSetSpec.ClusterID, nil //TODO: get this dynamically - return "test", nil + return "meh.tectonic.kuwit.rocks", nil } // template for user data // takes the following parameters: // 1 - type of machine (infra/compute) // 2 - base64-encoded bootstrap.kubeconfig -const userDataTemplate = `#cloud-config -write_files: -- path: /root/openshift_bootstrap/openshift_settings.yaml - owner: 'root:root' - permissions: '0640' - content: | - openshift_group_type: {{ .NodeType }} -{{- if .IsNode }} -- path: /etc/origin/node/bootstrap.kubeconfig - owner: 'root:root' - permissions: '0640' - encoding: b64 - content: {{ .BootstrapKubeconfig }} -{{- end }} -runcmd: -- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] -{{- if .IsNode }} -- [ systemctl, restart, systemd-hostnamed] -- [ systemctl, restart, NetworkManager] -- [ systemctl, enable, origin-node] -- [ systemctl, start, origin-node] -{{- end }}` +const userDataTemplate = `{ + "ignition": { + "config": { + "append": [ + { + "source": "https://meh-tnc.tectonic.kuwit.rocks:80/config/worker", + "verification": {} + } + ] + }, + "security": { + "tls": { + "certificateAuthorities": [ + { + "source": "data:text/plain;charset=utf-8;base64,LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDVENDQWZHZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFtTVJJd0VBWURWUVFMRXdsdmNHVnUKYzJocFpuUXhFREFPQmdOVkJBTVRCM0p2YjNRdFkyRXdIaGNOTVRnd09EQTRNVEl4T1RJeVdoY05Namd3T0RBMQpNVEl4T1RJeVdqQW1NUkl3RUFZRFZRUUxFd2x2Y0dWdWMyaHBablF4RURBT0JnTlZCQU1UQjNKdmIzUXRZMkV3CmdnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM3VHF6NWJ5Y0xEYjI4SkhWeUV2VWEKVHNldjUyKzdpay9zbitlelZFTFZZMXc5ODJCdTdDVnFKR01uR09pWGl4RVZCVW1qenBVUTJaektCaU8xbWIyNwpwM0Mza0lHZS9vUVRRT3pQRUVKY2o1WFpUM1lTMmhSNWtKQ3FZMm1QTE1iaGllMFBEbUh5NG00Q28yNG1vRGx1CkE3Y1BKV0lrd2NxMUZvL1JMbVdveXpjaWJRdjJzeWNCRjNpUFdJeFZ1ZzdyWDRYQ3lIQnVjaGZwYytQdGxIVkgKc1A3WGxDYVJGcFM4OTRrdnFGcXp1dnoway9aM3V2R2VsbHl1QktIWWN1UjUzcTJjVno1UUpmMFFQVjhBVFpHcwo5UEpWcVgycmNpMUtrZ0phVDVISElYVTY1N0RvTlpHWnBqZVNNekVsV0dJeHdQWFJDc3c5YUNyVGFibFhpeFNmCkFnTUJBQUdqUWpCQU1BNEdBMVVkRHdFQi93UUVBd0lDcERBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWQKRGdRV0JCUW1vSGhnYkQvaXZ0NEtKSWo3WlFkei9JQzBKREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBS1ZYawpkSmNvdXpERnllRlFuNVN0VjI4dzBiRDJLcm1UVG1HYnFRV2J3NEt3amtiaEwrRHRxSUtsRmlGaWxyelBGdTdDCjU1aWRxeU9IYVh0VWQ1b05yYzhZbDQxME4vSUlsNUh1Y280TXhVUjBIUnFqTkhZS3VDWmF1NHMxYUFWanRoRVMKM2s2ZkVQTy9lTzBaMGwwOW1ZekhwenZZWWtrQ2RwOVROUDk0eHBMZTVvaC85OEMrODRncFliWnpISmY4NzNwTgpCRG5zUUJvVXZVTkxwSSt2YmZ2UEFoUU9STDFzMGdPRGpBQ2psQ0NJSGxlYTJqUXdYRy9EWEd2bjRPVUI4Y1lKCmIwMkdaMWxrRWp1WmkwTS92USs0bEhuUy8xT3RoYTh5dmZETTNkaGMrTGNQbDh2aGxoazU0L3VkOURoZkFDWVEKMlV0UkZ1UkZKUmhJdmVJZ1lRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", + "verification": {} + } + ] + } + }, + "timeouts": {}, + "version": "2.2.0" + }, + "networkd": {}, + "passwd": {}, + "storage": {}, + "systemd": {} +}` type userDataParams struct { NodeType string From 2f804a73d92c2fc130c7f90cf238594e923357a7 Mon Sep 17 00:00:00 2001 From: Enxebre Date: Fri, 10 Aug 2018 08:50:53 +0200 Subject: [PATCH 2/3] ign form configMap --- client.go | 12 ++++++++++++ machineactuator.go | 43 ++++++++++++++++++++++++++++++------------- 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/client.go b/client.go index a640c3a8..452ed65f 100644 --- a/client.go +++ b/client.go @@ -129,3 +129,15 @@ func NewClient(kubeClient kubernetes.Interface, mSpec *cov1.MachineSetSpec, name elbClient: elb.New(s), }, nil } + +func getIgn(kubeClient kubernetes.Interface) (string, error) { + ignConfig, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get("ign-config", metav1.GetOptions{}) + if err != nil { + return "", err + } + ignConfigWorker, ok := ignConfig.Data["worker"] + if !ok { + return "", nil + } + return ignConfigWorker, nil +} diff --git a/machineactuator.go b/machineactuator.go index b2553f0c..f6a6820e 100644 --- a/machineactuator.go +++ b/machineactuator.go @@ -37,6 +37,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/elb" awsconfigv1 "github.com/enxebre/cluster-api-provider-aws/awsproviderconfig/v1alpha1" cov1 "github.com/enxebre/cluster-api-provider-aws/awsproviderconfig/v1alpha1" "github.com/openshift/cluster-operator/pkg/controller" @@ -88,6 +89,7 @@ type Actuator struct { userDataGenerator func(master, infra bool) (string, error) awsProviderConfigCodec *awsconfigv1.AWSProviderConfigCodec scheme *runtime.Scheme + ignConfig func(kubeClient kubernetes.Interface) (string, error) } // NewActuator returns a new AWS Actuator @@ -112,6 +114,7 @@ func NewActuator(kubeClient kubernetes.Interface, clusterClient clusterclient.In userDataGenerator: generateUserData, awsProviderConfigCodec: codec, scheme: scheme, + ignConfig: getIgn, } return actuator } @@ -263,6 +266,7 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. //{Key: aws.String("host-type"), Value: aws.String(hostType)}, //{Key: aws.String("sub-host-type"), Value: aws.String(subHostType)}, ////{Key: aws.String("kubernetes.io/cluster/" + awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID), Value: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID)}, + {Key: aws.String("clusterid"), Value: aws.String("meh.tectonic.kuwit.rocks")}, {Key: aws.String("kubernetes.io/cluster/meh"), Value: aws.String("owned")}, {Key: aws.String("tectonicClusterID"), Value: aws.String("447c6a4c-92a9-0266-3a23-9e3495006e24")}, {Key: aws.String("Name"), Value: aws.String(machine.Name)}, @@ -307,7 +311,14 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. //if err != nil { // return nil, err //} - userDataEnc := base64.StdEncoding.EncodeToString([]byte(userDataTemplate)) + + //userDataEnc := base64.StdEncoding.EncodeToString([]byte(userDataTemplate)) + + ignConfig, err := a.ignConfig(a.kubeClient) + if err != nil { + return nil, fmt.Errorf("unable to obtain EC2 client: %v", err) + } + userDataEnc := base64.StdEncoding.EncodeToString([]byte(ignConfig)) inputConfig := ec2.RunInstancesInput{ ImageId: describeAMIResult.Images[0].ImageId, @@ -334,6 +345,8 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. mLog.Errorf("unexpected reservation creating instances: %v", runResult) return nil, fmt.Errorf("unexpected reservation creating instance") } + + //addInstanceToELB(runResult.Instances[0], "", client) return runResult.Instances[0], nil } @@ -647,18 +660,7 @@ func iamRole(machine *clusterv1.Machine) string { func buildDescribeSecurityGroupsInput(vpcID, vpcName string, isMaster, isInfra bool) *ec2.DescribeSecurityGroupsInput { groupNames := []*string{aws.String(vpcName)} - if isMaster { - groupNames = append(groupNames, aws.String(vpcName+"_master")) - groupNames = append(groupNames, aws.String(vpcName+"_master_k8s")) - } - if isInfra { - groupNames = append(groupNames, aws.String(vpcName+"_infra")) - groupNames = append(groupNames, aws.String(vpcName+"_infra_k8s")) - } - if !isMaster && !isInfra { - groupNames = append(groupNames, aws.String(vpcName+"_compute")) - groupNames = append(groupNames, aws.String(vpcName+"_compute_k8s")) - } + groupNames = append(groupNames, aws.String(vpcName+"_worker_sg")) return &ec2.DescribeSecurityGroupsInput{ Filters: []*ec2.Filter{ @@ -667,3 +669,18 @@ func buildDescribeSecurityGroupsInput(vpcID, vpcName string, isMaster, isInfra b }, } } + +func addInstanceToELB(instance *ec2.Instance, elbName string, client Client) error { + registerInput := elb.RegisterInstancesWithLoadBalancerInput{ + Instances: []*elb.Instance{{InstanceId: instance.InstanceId}}, + LoadBalancerName: aws.String(elbName), + } + + // This API call appears to be idempotent, so for now no need to check if the instance is + // registered first, we can just request that it be added. + _, err := client.RegisterInstancesWithLoadBalancer(®isterInput) + if err != nil { + return err + } + return nil +} From 2163e3f509a9d44b8978453caad9e81ba8a77882 Mon Sep 17 00:00:00 2001 From: Enxebre Date: Fri, 10 Aug 2018 12:56:36 +0200 Subject: [PATCH 3/3] cleanup --- machineactuator.go | 217 +++++---------------------------------------- utils.go | 46 +++++----- 2 files changed, 47 insertions(+), 216 deletions(-) diff --git a/machineactuator.go b/machineactuator.go index f6a6820e..1b4bf5a8 100644 --- a/machineactuator.go +++ b/machineactuator.go @@ -17,11 +17,9 @@ limitations under the License. package aws import ( - "bytes" "encoding/base64" "fmt" - "io/ioutil" - "text/template" + "strings" log "github.com/sirupsen/logrus" @@ -37,7 +35,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/elb" awsconfigv1 "github.com/enxebre/cluster-api-provider-aws/awsproviderconfig/v1alpha1" cov1 "github.com/enxebre/cluster-api-provider-aws/awsproviderconfig/v1alpha1" "github.com/openshift/cluster-operator/pkg/controller" @@ -45,23 +42,8 @@ import ( ) const ( - // Path to bootstrap kubeconfig. This needs to be mounted to the controller pod - // as a secret when running this controller. - bootstrapKubeConfig = "/etc/origin/master/bootstrap.kubeconfig" - - // IAM role for infra/compute - defaultIAMRole = "openshift_node_describe_instances" - - // IAM role for master - masterIAMRole = "openshift_master_launch_instances" - - // Instance ID annotation - instanceIDAnnotation = "cluster-operator.openshift.io/aws-instance-id" - awsCredsSecretIDKey = "awsAccessKeyId" awsCredsSecretAccessKey = "awsSecretAccessKey" - - ec2InstanceIDNotFoundCode = "InvalidInstanceID.NotFound" ) // Instance tag constants @@ -86,10 +68,14 @@ type Actuator struct { defaultAvailabilityZone string logger *log.Entry clientBuilder func(kubeClient kubernetes.Interface, mSpec *cov1.MachineSetSpec, namespace, region string) (Client, error) - userDataGenerator func(master, infra bool) (string, error) - awsProviderConfigCodec *awsconfigv1.AWSProviderConfigCodec - scheme *runtime.Scheme - ignConfig func(kubeClient kubernetes.Interface) (string, error) + //userDataGenerator func(master, infra bool) (string, error) + awsProviderConfigCodec *awsconfigv1.AWSProviderConfigCodec + scheme *runtime.Scheme + ignConfig func(kubeClient kubernetes.Interface) (string, error) +} + +func getWorkerRole() { + } // NewActuator returns a new AWS Actuator @@ -111,10 +97,10 @@ func NewActuator(kubeClient kubernetes.Interface, clusterClient clusterclient.In defaultAvailabilityZone: defaultAvailabilityZone, logger: logger, clientBuilder: NewClient, - userDataGenerator: generateUserData, - awsProviderConfigCodec: codec, - scheme: scheme, - ignConfig: getIgn, + //userDataGenerator: generateUserData, + awsProviderConfigCodec: codec, + scheme: scheme, + ignConfig: getIgn, } return actuator } @@ -188,7 +174,8 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. } // Describe VPC - vpcName := "meh.tectonic.kuwit.rocks" + vpcName := awsProviderConfig.ClusterID + clusterName := strings.Split(vpcName, ".")[0] vpcNameFilter := "tag:Name" describeVpcsRequest := ec2.DescribeVpcsInput{ Filters: []*ec2.Filter{{Name: &vpcNameFilter, Values: []*string{&vpcName}}}, @@ -262,12 +249,8 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. // Add tags to the created machine tagList := []*ec2.Tag{ - //{Key: aws.String("clusterid"), Value: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID)}, - //{Key: aws.String("host-type"), Value: aws.String(hostType)}, - //{Key: aws.String("sub-host-type"), Value: aws.String(subHostType)}, - ////{Key: aws.String("kubernetes.io/cluster/" + awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID), Value: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.ClusterID)}, - {Key: aws.String("clusterid"), Value: aws.String("meh.tectonic.kuwit.rocks")}, - {Key: aws.String("kubernetes.io/cluster/meh"), Value: aws.String("owned")}, + {Key: aws.String("clusterid"), Value: aws.String(vpcName)}, + {Key: aws.String(fmt.Sprintf("kubernetes.io/cluster/%s", clusterName)), Value: aws.String("owned")}, {Key: aws.String("tectonicClusterID"), Value: aws.String("447c6a4c-92a9-0266-3a23-9e3495006e24")}, {Key: aws.String("Name"), Value: aws.String(machine.Name)}, } @@ -280,40 +263,6 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. Tags: tagList[0:1], } - // For now, these are fixed - //blkDeviceMappings := []*ec2.BlockDeviceMapping{ - // { - // DeviceName: aws.String("/dev/sda"), - // Ebs: &ec2.EbsBlockDevice{ - // DeleteOnTermination: aws.Bool(true), - // VolumeSize: aws.Int64(100), - // VolumeType: aws.String("gp2"), - // }, - // }, - // //{ - // // DeviceName: aws.String("/dev/sdb"), - // // Ebs: &ec2.EbsBlockDevice{ - // // DeleteOnTermination: aws.Bool(true), - // // VolumeSize: aws.Int64(100), - // // VolumeType: aws.String("gp2"), - // // }, - // //}, - //} - - // Only compute nodes should get user data, and it's quite important that masters do not as the - // AWS actuator for these is running on the root CO cluster currently, and we do not want to leak - // root CO cluster bootstrap kubeconfigs to the target cluster. - //userData, err := a.userDataGenerator(controller.MachineHasRole(machine, capicommon.MasterRole), coMachineSetSpec.Infra) - //if err != nil { - // return nil, err - //} - //userData, err := GenerateIgnConfig() - //if err != nil { - // return nil, err - //} - - //userDataEnc := base64.StdEncoding.EncodeToString([]byte(userDataTemplate)) - ignConfig, err := a.ignConfig(a.kubeClient) if err != nil { return nil, fmt.Errorf("unable to obtain EC2 client: %v", err) @@ -327,7 +276,7 @@ func (a *Actuator) CreateMachine(cluster *clusterv1.Cluster, machine *clusterv1. MaxCount: aws.Int64(1), KeyName: aws.String(awsClusterProviderConfig.ClusterDeploymentSpec.Hardware.AWS.KeyPairName), IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ - Name: aws.String(iamRole(machine)), + Name: aws.String(iamRole(clusterName)), }, //BlockDeviceMappings: blkDeviceMappings, TagSpecifications: []*ec2.TagSpecification{tagInstance, tagVolume}, @@ -379,7 +328,8 @@ func (a *Actuator) DeleteMachine(machine *clusterv1.Machine) error { return fmt.Errorf("error getting EC2 client: %v", err) } - instances, err := GetRunningInstances(machine, client) + clusterId := awsProviderConfig.ClusterID + instances, err := GetRunningInstances(machine, client, clusterId) if err != nil { return err } @@ -419,7 +369,7 @@ func (a *Actuator) Update(cluster *clusterv1.Cluster, machine *clusterv1.Machine return fmt.Errorf("unable to obtain EC2 client: %v", err) } - instances, err := GetRunningInstances(machine, client) + instances, err := GetRunningInstances(machine, client, awsProviderConfig.ClusterID) mLog.Debugf("found %d instances for machine", len(instances)) if err != nil { return err @@ -475,7 +425,7 @@ func (a *Actuator) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine return false, fmt.Errorf("error getting EC2 client: %v", err) } - instances, err := GetRunningInstances(machine, client) + instances, err := GetRunningInstances(machine, client, awsProviderConfig.ClusterID) if err != nil { return false, err } @@ -550,112 +500,8 @@ func (a *Actuator) updateStatus(machine *clusterv1.Machine, instance *ec2.Instan return nil } -func getClusterID(machine *clusterv1.Machine) (string, error) { - //coMachineSetSpec, err := controller.MachineSetSpecFromClusterAPIMachineSpec(&machine.Spec) - //if err != nil { - // return "", err - //} - //return coMachineSetSpec.ClusterID, nil - //TODO: get this dynamically - return "meh.tectonic.kuwit.rocks", nil -} - -// template for user data -// takes the following parameters: -// 1 - type of machine (infra/compute) -// 2 - base64-encoded bootstrap.kubeconfig -const userDataTemplate = `{ - "ignition": { - "config": { - "append": [ - { - "source": "https://meh-tnc.tectonic.kuwit.rocks:80/config/worker", - "verification": {} - } - ] - }, - "security": { - "tls": { - "certificateAuthorities": [ - { - "source": "data:text/plain;charset=utf-8;base64,LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURDVENDQWZHZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFtTVJJd0VBWURWUVFMRXdsdmNHVnUKYzJocFpuUXhFREFPQmdOVkJBTVRCM0p2YjNRdFkyRXdIaGNOTVRnd09EQTRNVEl4T1RJeVdoY05Namd3T0RBMQpNVEl4T1RJeVdqQW1NUkl3RUFZRFZRUUxFd2x2Y0dWdWMyaHBablF4RURBT0JnTlZCQU1UQjNKdmIzUXRZMkV3CmdnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM3VHF6NWJ5Y0xEYjI4SkhWeUV2VWEKVHNldjUyKzdpay9zbitlelZFTFZZMXc5ODJCdTdDVnFKR01uR09pWGl4RVZCVW1qenBVUTJaektCaU8xbWIyNwpwM0Mza0lHZS9vUVRRT3pQRUVKY2o1WFpUM1lTMmhSNWtKQ3FZMm1QTE1iaGllMFBEbUh5NG00Q28yNG1vRGx1CkE3Y1BKV0lrd2NxMUZvL1JMbVdveXpjaWJRdjJzeWNCRjNpUFdJeFZ1ZzdyWDRYQ3lIQnVjaGZwYytQdGxIVkgKc1A3WGxDYVJGcFM4OTRrdnFGcXp1dnoway9aM3V2R2VsbHl1QktIWWN1UjUzcTJjVno1UUpmMFFQVjhBVFpHcwo5UEpWcVgycmNpMUtrZ0phVDVISElYVTY1N0RvTlpHWnBqZVNNekVsV0dJeHdQWFJDc3c5YUNyVGFibFhpeFNmCkFnTUJBQUdqUWpCQU1BNEdBMVVkRHdFQi93UUVBd0lDcERBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWQKRGdRV0JCUW1vSGhnYkQvaXZ0NEtKSWo3WlFkei9JQzBKREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBS1ZYawpkSmNvdXpERnllRlFuNVN0VjI4dzBiRDJLcm1UVG1HYnFRV2J3NEt3amtiaEwrRHRxSUtsRmlGaWxyelBGdTdDCjU1aWRxeU9IYVh0VWQ1b05yYzhZbDQxME4vSUlsNUh1Y280TXhVUjBIUnFqTkhZS3VDWmF1NHMxYUFWanRoRVMKM2s2ZkVQTy9lTzBaMGwwOW1ZekhwenZZWWtrQ2RwOVROUDk0eHBMZTVvaC85OEMrODRncFliWnpISmY4NzNwTgpCRG5zUUJvVXZVTkxwSSt2YmZ2UEFoUU9STDFzMGdPRGpBQ2psQ0NJSGxlYTJqUXdYRy9EWEd2bjRPVUI4Y1lKCmIwMkdaMWxrRWp1WmkwTS92USs0bEhuUy8xT3RoYTh5dmZETTNkaGMrTGNQbDh2aGxoazU0L3VkOURoZkFDWVEKMlV0UkZ1UkZKUmhJdmVJZ1lRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", - "verification": {} - } - ] - } - }, - "timeouts": {}, - "version": "2.2.0" - }, - "networkd": {}, - "passwd": {}, - "storage": {}, - "systemd": {} -}` - -type userDataParams struct { - NodeType string - BootstrapKubeconfig string - IsNode bool -} - -func executeTemplate(isMaster, isInfra bool, bootstrapKubeconfig string) (string, error) { - var nodeType string - if isMaster { - nodeType = "master" - } else if isInfra { - nodeType = "infra" - } else { - nodeType = "compute" - } - params := userDataParams{ - NodeType: nodeType, - BootstrapKubeconfig: bootstrapKubeconfig, - IsNode: !isMaster, - } - - t, err := template.New("userdata").Parse(userDataTemplate) - if err != nil { - return "", err - } - var buf bytes.Buffer - err = t.Execute(&buf, params) - if err != nil { - return "", err - } - return buf.String(), nil -} - -// generateUserData is a generator function used in the actuator to create the user data for a -// specific type of machine. -func generateUserData(isMaster, isInfra bool) (string, error) { - var bootstrapKubeconfig string - var err error - if !isMaster { - bootstrapKubeconfig, err = getBootstrapKubeconfig() - if err != nil { - return "", fmt.Errorf("cannot get bootstrap kubeconfig: %v", err) - } - } - - return executeTemplate(isMaster, isInfra, bootstrapKubeconfig) -} - -// getBootstrapKubeconfig reads the bootstrap kubeconfig expected to be mounted into the pod. This assumes -// the actuator runs on a master which has such a kubeconfig for joining nodes to the cluster. -func getBootstrapKubeconfig() (string, error) { - content, err := ioutil.ReadFile(bootstrapKubeConfig) - if err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(content), nil -} - -func iamRole(machine *clusterv1.Machine) string { - if controller.MachineHasRole(machine, capicommon.MasterRole) { - return masterIAMRole - } - return defaultIAMRole +func iamRole(clusterName string) string { + return fmt.Sprintf("%s-master-profile", clusterName) } func buildDescribeSecurityGroupsInput(vpcID, vpcName string, isMaster, isInfra bool) *ec2.DescribeSecurityGroupsInput { @@ -669,18 +515,3 @@ func buildDescribeSecurityGroupsInput(vpcID, vpcName string, isMaster, isInfra b }, } } - -func addInstanceToELB(instance *ec2.Instance, elbName string, client Client) error { - registerInput := elb.RegisterInstancesWithLoadBalancerInput{ - Instances: []*elb.Instance{{InstanceId: instance.InstanceId}}, - LoadBalancerName: aws.String(elbName), - } - - // This API call appears to be idempotent, so for now no need to check if the instance is - // registered first, we can just request that it be added. - _, err := client.RegisterInstancesWithLoadBalancer(®isterInput) - if err != nil { - return err - } - return nil -} diff --git a/utils.go b/utils.go index 610f0949..594e29c4 100644 --- a/utils.go +++ b/utils.go @@ -73,43 +73,43 @@ func chooseNewest(instance1, instance2 *ec2.Instance) *ec2.Instance { // GetInstance returns the AWS instance for a given machine. If multiple instances match our machine, // the most recently launched will be returned. If no instance exists, an error will be returned. -func GetInstance(machine *clusterv1.Machine, client Client) (*ec2.Instance, error) { - instances, err := GetRunningInstances(machine, client) - if err != nil { - return nil, err - } - if len(instances) == 0 { - return nil, fmt.Errorf("no instance found for machine: %s", machine.Name) - } - - instance, _ := SortInstances(instances) - return instance, nil -} +//func GetInstance(machine *clusterv1.Machine, client Client) (*ec2.Instance, error) { +// instances, err := GetRunningInstances(machine, client) +// if err != nil { +// return nil, err +// } +// if len(instances) == 0 { +// return nil, fmt.Errorf("no instance found for machine: %s", machine.Name) +// } +// +// instance, _ := SortInstances(instances) +// return instance, nil +//} // GetRunningInstances returns all running instances that have a tag matching our machine name, // and cluster ID. -func GetRunningInstances(machine *clusterv1.Machine, client Client) ([]*ec2.Instance, error) { +func GetRunningInstances(machine *clusterv1.Machine, client Client, clusterId string) ([]*ec2.Instance, error) { runningInstanceStateFilter := []*string{aws.String(ec2.InstanceStateNameRunning), aws.String(ec2.InstanceStateNamePending)} - return GetInstances(machine, client, runningInstanceStateFilter) + return GetInstances(machine, client, runningInstanceStateFilter, clusterId) } // GetStoppedInstances returns all stopped instances that have a tag matching our machine name, // and cluster ID. -func GetStoppedInstances(machine *clusterv1.Machine, client Client) ([]*ec2.Instance, error) { - stoppedInstanceStateFilter := []*string{aws.String(ec2.InstanceStateNameStopped), aws.String(ec2.InstanceStateNameStopping)} - return GetInstances(machine, client, stoppedInstanceStateFilter) -} +//func GetStoppedInstances(machine *clusterv1.Machine, client Client) ([]*ec2.Instance, error) { +// stoppedInstanceStateFilter := []*string{aws.String(ec2.InstanceStateNameStopped), aws.String(ec2.InstanceStateNameStopping)} +// return GetInstances(machine, client, stoppedInstanceStateFilter) +//} // GetInstances returns all instances that have a tag matching our machine name, // and cluster ID. -func GetInstances(machine *clusterv1.Machine, client Client, instanceStateFilter []*string) ([]*ec2.Instance, error) { +func GetInstances(machine *clusterv1.Machine, client Client, instanceStateFilter []*string, clusterId string) ([]*ec2.Instance, error) { machineName := machine.Name - clusterID, err := getClusterID(machine) - if err != nil { - return []*ec2.Instance{}, fmt.Errorf("unable to get cluster ID for machine %q: %v", machine.Name, err) - } + clusterID := clusterId + //if err != nil { + // return []*ec2.Instance{}, fmt.Errorf("unable to get cluster ID for machine %q: %v", machine.Name, err) + //} requestFilters := []*ec2.Filter{ {