From da73f9da608abf2c0959c558908fecf5d563aed2 Mon Sep 17 00:00:00 2001 From: Gianluca Arbezzano Date: Thu, 7 May 2020 12:59:37 +0200 Subject: [PATCH] fix device tags The tags generated by the machine controller where not merged with the one passed from the MachineSpec. This PR fixes the issue. Now we can use those labels to filter devices via packet api. --- api/v1alpha3/packetcluster_types.go | 11 ++++++++++ api/v1alpha3/zz_generated.deepcopy.go | 22 ++++++++++++++++++- controllers/packetcluster_controller.go | 9 ++++++++ controllers/packetmachine_controller.go | 3 ++- pkg/cloud/packet/client.go | 5 +++-- templates/cluster-template.yaml | 28 +++++++++++++++++++++---- 6 files changed, 70 insertions(+), 8 deletions(-) diff --git a/api/v1alpha3/packetcluster_types.go b/api/v1alpha3/packetcluster_types.go index 30ad3da0c..821a1c8b9 100644 --- a/api/v1alpha3/packetcluster_types.go +++ b/api/v1alpha3/packetcluster_types.go @@ -32,6 +32,14 @@ type PacketClusterSpec struct { ProjectID string `json:"projectID"` } +// APIEndpoint represents a reachable Kubernetes API endpoint. +type APIEndpoint struct { + // The hostname on which the API server is serving. + Host string `json:"host"` + // The port on which the API server is serving. + Port int `json:"port"` +} + // PacketClusterStatus defines the observed state of PacketCluster type PacketClusterStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster @@ -40,6 +48,9 @@ type PacketClusterStatus struct { // Ready denotes that the cluster (infrastructure) is ready. // +optional Ready bool `json:"ready"` + // APIEndpoints represents the endpoints to communicate with the control plane. + // +optional + APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"` } // +kubebuilder:subresource:status diff --git a/api/v1alpha3/zz_generated.deepcopy.go b/api/v1alpha3/zz_generated.deepcopy.go index 1accfdf40..55efedfa1 100644 --- a/api/v1alpha3/zz_generated.deepcopy.go +++ b/api/v1alpha3/zz_generated.deepcopy.go @@ -26,13 +26,28 @@ import ( "sigs.k8s.io/cluster-api/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. +func (in *APIEndpoint) DeepCopy() *APIEndpoint { + if in == nil { + return nil + } + out := new(APIEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PacketCluster) DeepCopyInto(out *PacketCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCluster. @@ -103,6 +118,11 @@ func (in *PacketClusterSpec) DeepCopy() *PacketClusterSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PacketClusterStatus) DeepCopyInto(out *PacketClusterStatus) { *out = *in + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make([]APIEndpoint, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketClusterStatus. diff --git a/controllers/packetcluster_controller.go b/controllers/packetcluster_controller.go index 722103d6e..19404e4d4 100644 --- a/controllers/packetcluster_controller.go +++ b/controllers/packetcluster_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "time" "github.com/go-logr/logr" "github.com/pkg/errors" @@ -68,6 +69,14 @@ func (r *PacketClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re return ctrl.Result{}, err } + if cluster == nil { + logger.Info("OwenerCluster is not set yet. Requeuing...") + return ctrl.Result{ + Requeue: true, + RequeueAfter: 2 * time.Second, + }, nil + } + // Create the cluster scope clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Logger: logger, diff --git a/controllers/packetmachine_controller.go b/controllers/packetmachine_controller.go index 9b17881f9..617022e28 100644 --- a/controllers/packetmachine_controller.go +++ b/controllers/packetmachine_controller.go @@ -208,8 +208,9 @@ func (r *PacketMachineReconciler) reconcile(ctx context.Context, machineScope *s } else { tags = append(tags, infrastructurev1alpha3.WorkerTag) } + name := machineScope.Name() - dev, err = r.PacketClient.NewDevice(name, clusterScope.PacketCluster.Spec.ProjectID, machineScope.PacketMachine.Spec) + dev, err = r.PacketClient.NewDevice(name, clusterScope.PacketCluster.Spec.ProjectID, machineScope.PacketMachine.Spec, tags) if err != nil { errs := fmt.Errorf("failed to create machine %s: %v", name, err) machineScope.SetErrorReason(capierrors.CreateMachineError) diff --git a/pkg/cloud/packet/client.go b/pkg/cloud/packet/client.go index 4743b2ceb..c3da8b43a 100644 --- a/pkg/cloud/packet/client.go +++ b/pkg/cloud/packet/client.go @@ -43,7 +43,8 @@ func (p *PacketClient) GetDevice(deviceID string) (*packngo.Device, error) { return dev, err } -func (p *PacketClient) NewDevice(hostname, project string, spec infrav1.PacketMachineSpec) (*packngo.Device, error) { +func (p *PacketClient) NewDevice(hostname, project string, spec infrav1.PacketMachineSpec, extraTags []string) (*packngo.Device, error) { + tags := append(spec.Tags, extraTags...) serverCreateOpts := &packngo.DeviceCreateRequest{ Hostname: hostname, ProjectID: project, @@ -51,7 +52,7 @@ func (p *PacketClient) NewDevice(hostname, project string, spec infrav1.PacketMa BillingCycle: spec.BillingCycle, Plan: spec.MachineType, OS: spec.OS, - Tags: spec.Tags, + Tags: tags, } dev, _, err := p.Client.Devices.Create(serverCreateOpts) diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index da37f3e59..743c62099 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -2,7 +2,7 @@ kind: KubeadmConfig apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 metadata: - name: my-control-plane1-config + name: "${CLUSTER_NAME}-control-plane1-config" spec: initConfiguration: nodeRegistration: @@ -46,7 +46,7 @@ spec: configRef: kind: KubeadmConfig apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 - name: my-control-plane1-config + name: "${CLUSTER_NAME}-control-plane1-config" infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: PacketMachine @@ -78,7 +78,7 @@ spec: configRef: kind: KubeadmConfig apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 - name: my-control-plane1-config + name: "${CLUSTER_NAME}-worker0-config" infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: PacketMachine @@ -110,7 +110,7 @@ spec: configRef: kind: KubeadmConfig apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 - name: my-control-plane1-config + name: "${CLUSTER_NAME}-worker1-config" infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: PacketMachine @@ -129,3 +129,23 @@ spec: sshKeys: - "${SSH_KEY}" tags: [] +--- +kind: KubeadmConfig +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-worker1-config" +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +kind: KubeadmConfig +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-worker0-config" +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%