Skip to content

Commit

Permalink
added support for EKS cluster import
Browse files Browse the repository at this point in the history
Signed-off-by: Asutosh Palai <asupalai@gmail.com>
  • Loading branch information
asutoshpalai committed Jul 25, 2023
1 parent 6a3bda9 commit 6dc94a4
Show file tree
Hide file tree
Showing 6 changed files with 185 additions and 21 deletions.
37 changes: 37 additions & 0 deletions internal/client/ekscluster/cluster_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,13 @@ SPDX-License-Identifier: MPL-2.0
package ekscluster

import (
"fmt"
"net/http"
"net/url"

"github.com/pkg/errors"

clienterrors "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/errors"
"github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/transport"
"github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper"
eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster"
Expand Down Expand Up @@ -40,6 +45,8 @@ type ClientService interface {

EksClusterResourceServiceGet(fn *eksmodel.VmwareTanzuManageV1alpha1EksclusterFullName) (*eksmodel.VmwareTanzuManageV1alpha1EksclusterGetEksClusterResponse, error)

EksClusterResourceServiceGetByID(id string) (*eksmodel.VmwareTanzuManageV1alpha1EksclusterGetEksClusterResponse, error)

EksClusterResourceServiceUpdate(request *eksmodel.VmwareTanzuManageV1alpha1EksclusterCreateUpdateEksClusterRequest) (*eksmodel.VmwareTanzuManageV1alpha1EksclusterCreateUpdateEksClusterResponse, error)
}

Expand Down Expand Up @@ -94,6 +101,36 @@ func (c *Client) EksClusterResourceServiceGet(fn *eksmodel.VmwareTanzuManageV1al
return clusterResponse, err
}

/*
EksClusterResourceServiceGetByID gets an eks cluster by its ID.
*/
func (c *Client) EksClusterResourceServiceGetByID(id string) (*eksmodel.VmwareTanzuManageV1alpha1EksclusterGetEksClusterResponse, error) {
queryParams := url.Values{
"query": []string{fmt.Sprintf("uid=\"%s\"", id)},
}

requestURL := helper.ConstructRequestURL(apiVersionAndGroup).AppendQueryParams(queryParams).String()
clusterListResponse := &eksmodel.VmwareTanzuManageV1alpha1EksclusterListEksClustersResponse{}

err := c.Get(requestURL, clusterListResponse)
if err != nil {
return nil, err
}

if len(clusterListResponse.EksClusters) == 0 {
return nil, clienterrors.ErrorWithHTTPCode(http.StatusNotFound, errors.New("cluster list by ID was empty"))
}

if len(clusterListResponse.EksClusters) > 1 {
return nil, clienterrors.ErrorWithHTTPCode(http.StatusExpectationFailed, errors.New("cluster list by ID returned more than one cluster"))
}

clusterResponse := &eksmodel.VmwareTanzuManageV1alpha1EksclusterGetEksClusterResponse{}
clusterResponse.EksCluster = clusterListResponse.EksClusters[0]

return clusterResponse, nil
}

/*
EksClusterResourceServiceUpdate updates overwrite an eks cluster.
*/
Expand Down
43 changes: 43 additions & 0 deletions internal/models/ekscluster/method_list.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/*
Copyright © 2023 VMware, Inc. All Rights Reserved.
SPDX-License-Identifier: MPL-2.0
*/

package models

import (
"github.com/go-openapi/swag"
)

// VmwareTanzuManageV1alpha1EksclusterListEksClustersResponse Response from listing EksClusters.
//
// swagger:model vmware.tanzu.manage.v1alpha1.ekscluster.ListEksClustersResponse
type VmwareTanzuManageV1alpha1EksclusterListEksClustersResponse struct {

// List of eksclusters.
EksClusters []*VmwareTanzuManageV1alpha1EksclusterEksCluster `json:"eksClusters"`

// Total count.
TotalCount string `json:"totalCount,omitempty"`
}

// MarshalBinary interface implementation.
func (m *VmwareTanzuManageV1alpha1EksclusterListEksClustersResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}

return swag.WriteJSON(m)
}

// UnmarshalBinary interface implementation.
func (m *VmwareTanzuManageV1alpha1EksclusterListEksClustersResponse) UnmarshalBinary(b []byte) error {
var res VmwareTanzuManageV1alpha1EksclusterListEksClustersResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}

*m = res

return nil
}
27 changes: 18 additions & 9 deletions internal/resources/ekscluster/data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,18 +115,27 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m
// always run
d.SetId(resp.EksCluster.Meta.UID)

err = setResourceData(d, resp.EksCluster, npresp.Nodepools)
if err != nil {
return diag.FromErr(errors.Wrap(err, "failed to set resource data for cluster read"))
}

return diags
}

func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster, remoteNodepools []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolNodepool) error {
status := map[string]interface{}{
// TODO: add condition
"platform_version": resp.EksCluster.Status.PlatformVersion,
"phase": resp.EksCluster.Status.Phase,
"platform_version": eksCluster.Status.PlatformVersion,
"phase": eksCluster.Status.Phase,
}

if err := d.Set(StatusKey, status); err != nil {
return diag.FromErr(err)
return errors.Wrapf(err, "Failed to set status for the cluster %s", eksCluster.FullName.Name)
}

if err := d.Set(common.MetaKey, common.FlattenMeta(resp.EksCluster.Meta)); err != nil {
return diag.FromErr(err)
if err := d.Set(common.MetaKey, common.FlattenMeta(eksCluster.Meta)); err != nil {
return errors.Wrap(err, "Failed to set meta for the cluster")
}

_, tfNodepools := constructEksClusterSpec(d)
Expand All @@ -136,7 +145,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m

nodepools := make([]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition, len(tfNodepools))

for _, np := range npresp.Nodepools {
for _, np := range remoteNodepools {
npDef := &eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition{
Info: &eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolInfo{
Description: np.Meta.Description,
Expand All @@ -159,11 +168,11 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m
}
}

if err := d.Set(specKey, flattenClusterSpec(resp.EksCluster.Spec, nodepools)); err != nil {
return diag.FromErr(err)
if err := d.Set(specKey, flattenClusterSpec(eksCluster.Spec, nodepools)); err != nil {
return errors.Wrapf(err, "Failed to set the spec for cluster %s", eksCluster.FullName.Name)
}

return diags
return nil
}

// Returns mapping of nodepool names to their positions in the array.
Expand Down
19 changes: 16 additions & 3 deletions internal/resources/ekscluster/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@ func nodepoolSpecEqual(spec1 *eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepo
spec1.CapacityType == spec2.CapacityType &&
setEquality(spec1.InstanceTypes, spec2.InstanceTypes) &&
structEqual(spec1.LaunchTemplate, spec2.LaunchTemplate) &&
reflect.DeepEqual(spec1.NodeLabels, spec2.NodeLabels) &&
mapEqual(spec1.NodeLabels, spec2.NodeLabels) &&
nodepoolRemoteAccessEqual(spec1.RemoteAccess, spec2.RemoteAccess) &&
spec1.RoleArn == spec2.RoleArn &&
spec1.RootDiskSize == spec2.RootDiskSize &&
structEqual(spec1.ScalingConfig, spec2.ScalingConfig) &&
setEquality(spec1.SubnetIds, spec2.SubnetIds) &&
reflect.DeepEqual(spec1.Tags, spec2.Tags) &&
mapEqual(spec1.Tags, spec2.Tags) &&
nodepoolTaintsEqual(spec1.Taints, spec2.Taints) &&
structEqual(spec1.UpdateConfig, spec2.UpdateConfig)
}
Expand All @@ -44,7 +44,7 @@ func clusterConfigEqual(config1, config2 *eksmodel.VmwareTanzuManageV1alpha1Eksc
return structEqual(config1.KubernetesNetworkConfig, config2.KubernetesNetworkConfig) &&
structEqual(config1.Logging, config2.Logging) &&
config1.RoleArn == config2.RoleArn &&
reflect.DeepEqual(config1.Tags, config2.Tags) &&
mapEqual(config1.Tags, config2.Tags) &&
config1.Version == config2.Version &&
clusterVPCConfigEqual(config1.Vpc, config2.Vpc)
}
Expand Down Expand Up @@ -77,6 +77,19 @@ func structEqual[T any](a, b *T) bool {
return reflect.DeepEqual(a, b)
}

// mapEqual handles the cases where one map is nil and the other one is empty.
func mapEqual[K comparable, V any](a, b map[K]V) bool {
if len(a) != len(b) {
return false
}

if len(a) == 0 {
return true
}

return reflect.DeepEqual(a, b)
}

func nodepoolTaintsEqual(taints1, taints2 []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolTaint) bool {
if len(taints1) != len(taints2) {
return false
Expand Down
66 changes: 57 additions & 9 deletions internal/resources/ekscluster/resource_ekscluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,6 @@ var ignoredTagsPrefix = "tmc.cloud.vmware.com/"

const defaultTimeout = 3 * time.Minute

var TMCGeneratedTags []string = []string{
"tmc.cloud.vmware.com/tmc-creator",
"tmc.cloud.vmware.com/tmc-credential",
"tmc.cloud.vmware.com/tmc-managed",
"tmc.cloud.vmware.com/tmc-org",
}

func ResourceTMCEKSCluster() *schema.Resource {
return &schema.Resource{
Schema: clusterSchema,
Expand All @@ -49,7 +42,10 @@ func ResourceTMCEKSCluster() *schema.Resource {
},
UpdateContext: resourceClusterInPlaceUpdate,
DeleteContext: resourceClusterDelete,
Description: "Tanzu Mission Control EKS Cluster Resource",
Importer: &schema.ResourceImporter{
StateContext: resourceClusterImporter,
},
Description: "Tanzu Mission Control EKS Cluster Resource",
}
}

Expand Down Expand Up @@ -85,6 +81,9 @@ var clusterSchema = map[string]*schema.Schema{
Description: "Wait timeout duration until cluster resource reaches READY state. Accepted timeout duration values like 5s, 45m, or 3h, higher than zero",
Default: "default",
Optional: true,
DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
return true
},
},
}

Expand Down Expand Up @@ -169,6 +168,17 @@ var configSchema = &schema.Schema{
Optional: true,
ForceNew: false,
MaxItems: 1,
// Suppress the diff between not being declared and all the values being false
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
lastDotIndex := strings.LastIndex(k, ".")
if lastDotIndex == -1 {
return false
}

k = k[:lastDotIndex]
v1, v2 := d.GetChange(k)
return reflect.DeepEqual(v1, v2)
},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
apiServerKey: {
Expand Down Expand Up @@ -571,11 +581,49 @@ func resourceClusterInPlaceUpdate(ctx context.Context, d *schema.ResourceData, m
return dataSourceTMCEKSClusterRead(ctx, d, m)
}

func resourceClusterImporter(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
config := m.(authctx.TanzuContext)

id := d.Id()
if id == "" {
return nil, errors.New("ID is needed to import an TMC EKS cluster")
}

resp, err := config.TMCConnection.EKSClusterResourceService.EksClusterResourceServiceGetByID(id)
if err != nil {
return nil, errors.Wrapf(err, "Unable to get Tanzu Mission Control EKS cluster entry for id %s", id)
}

npresp, err := config.TMCConnection.EKSNodePoolResourceService.EksNodePoolResourceServiceList(resp.EksCluster.FullName)
if err != nil {
return nil, errors.Wrapf(err, "Unable to get Tanzu Mission Control EKS nodepools for cluster %s", resp.EksCluster.FullName.Name)
}

if err = d.Set(CredentialNameKey, resp.EksCluster.FullName.CredentialName); err != nil {
return nil, errors.Wrapf(err, "Failed to set credential name for the cluster %s", resp.EksCluster.FullName.Name)
}

if err = d.Set(RegionKey, resp.EksCluster.FullName.Region); err != nil {
return nil, errors.Wrapf(err, "Failed to set region for the cluster %s", resp.EksCluster.FullName.Name)
}

if err = d.Set(NameKey, resp.EksCluster.FullName.Name); err != nil {
return nil, errors.Wrapf(err, "Failed to set name for the cluster %s", resp.EksCluster.FullName.Name)
}

err = setResourceData(d, resp.EksCluster, npresp.Nodepools)
if err != nil {
return nil, errors.Wrapf(err, "Failed to set resource data during import for %s", resp.EksCluster.FullName.Name)
}

return []*schema.ResourceData{d}, nil
}

func handleClusterDiff(config authctx.TanzuContext, tmcCluster *eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster, meta *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectMeta, clusterSpec *eksmodel.VmwareTanzuManageV1alpha1EksclusterSpec) error {
updateCluster := false

if meta.Description != tmcCluster.Meta.Description ||
!reflect.DeepEqual(meta.Labels, tmcCluster.Meta.Labels) {
!mapEqual(meta.Labels, tmcCluster.Meta.Labels) {
updateCluster = true
tmcCluster.Meta.Description = meta.Description
tmcCluster.Meta.Labels = meta.Labels
Expand Down
14 changes: 14 additions & 0 deletions internal/resources/ekscluster/resource_ekscluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,11 @@ func setupHTTPMocks(t *testing.T, clusterName string) {
EksCluster: getModel,
}

listResponse := eksmodel.VmwareTanzuManageV1alpha1EksclusterListEksClustersResponse{
EksClusters: []*eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster{getModel},
TotalCount: "1",
}

// GET Nodepools mock setup
nodepools := make([]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolNodepool, 0)
nodepoolRequests := make([]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolAPIRequest, 0)
Expand Down Expand Up @@ -186,6 +191,7 @@ func setupHTTPMocks(t *testing.T, clusterName string) {
// Setup HTTP Responders
postEndpoint := fmt.Sprintf("https://%s/v1alpha1/eksclusters", endpoint)
getClusterEndpoint := fmt.Sprintf("https://%s/v1alpha1/eksclusters/%s", endpoint, clusterName)
listClusterEndpoint := fmt.Sprintf("https://%s/v1alpha1/eksclusters?query=uid%%3D%%22%s%%22", endpoint, postResponseModel.Meta.UID)
postNodepoolsEndpoint := fmt.Sprintf("https://%s/v1alpha1/eksclusters/%s/nodepools", endpoint, clusterName)
getClusterNodepoolsEndpoint := fmt.Sprintf("https://%s/v1alpha1/eksclusters/%s/nodepools", endpoint, clusterName)
deleteEndpoint := getClusterEndpoint
Expand All @@ -196,6 +202,9 @@ func setupHTTPMocks(t *testing.T, clusterName string) {
httpmock.RegisterResponder("GET", getClusterEndpoint,
bodyInspectingResponder(t, nil, 200, getResponse))

httpmock.RegisterResponder("GET", listClusterEndpoint,
bodyInspectingResponder(t, nil, 200, listResponse))

httpmock.RegisterResponder("POST", postNodepoolsEndpoint,
nodepoolsBodyInspectingResponder(t, nodepoolRequests, 200, nodepoolResponses))

Expand Down Expand Up @@ -294,6 +303,11 @@ func TestAcceptanceForMkpClusterResource(t *testing.T) {
checkResourceAttributes(provider, clusterConfig["CreateEksCluster"]...),
),
},
{
ResourceName: testhelper.EksClusterResourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
t.Log("cluster resource acceptance test complete!")
Expand Down

0 comments on commit 6dc94a4

Please sign in to comment.