Skip to content

Commit

Permalink
OCM-6318 | ci: Build up deprovision step
Browse files Browse the repository at this point in the history
  • Loading branch information
jameszwang committed May 8, 2024
1 parent e232b45 commit a1b23ac
Show file tree
Hide file tree
Showing 4 changed files with 123 additions and 64 deletions.
5 changes: 3 additions & 2 deletions tests/e2e/e2e_tear_down_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,13 @@ import (
)

var _ = Describe("ROSA CLI Test", func() {
It("Deprovision cluster",
It("DestroyClusterByProfile",
labels.Critical,
labels.Destroy,
func() {
client := rosacli.NewClient()
var errs = PH.DestroyClusterByProfile(client, true)
profile := PH.LoadProfileYamlFileByENV()
var errs = PH.DestroyClusterByProfile(profile, client, true)
Expect(errs).To(BeEmpty())
})
})
29 changes: 29 additions & 0 deletions tests/utils/profilehandler/data_cleaner.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
package profilehandler

import (
"github.com/openshift-online/ocm-common/pkg/aws/aws_client"
"github.com/openshift-online/ocm-common/pkg/test/kms_key"
"github.com/openshift-online/ocm-common/pkg/test/vpc_client"
)

func DeleteVPCChain(vpcID string, region string) error {
vpcClient, err := vpc_client.GenerateVPCByID(vpcID, region)
if err != nil {
return err
}
return vpcClient.DeleteVPCChain()
}

func ScheduleKMSDesiable(kmsKey string, region string) error {

return kms_key.ScheduleKeyDeletion(kmsKey, region)

}

func DeleteAuditLogRoleArn(arn string, region string) error {
awsClent, err := aws_client.CreateAWSClient("", region)
if err != nil {
return err
}
return awsClent.DeleteRoleAndPolicy(arn, false)
}
40 changes: 40 additions & 0 deletions tests/utils/profilehandler/parse_info.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
package profilehandler

import (
"encoding/json"
"github.com/openshift/rosa/tests/ci/config"
"github.com/openshift/rosa/tests/utils/common"
"github.com/openshift/rosa/tests/utils/log"
)

// ParseUserData Get user data from resources.json file
func ParseUserData() (*UserData, error) {
var ud *UserData
udContent, err := common.ReadFileContent(config.Test.UserDataFile)
if err != nil {
log.Logger.Errorf("Error happened when read user data: %s", err.Error())
return nil, err
}
err = json.Unmarshal([]byte(udContent), &ud)
if err != nil {
log.Logger.Errorf("Error happend when parse resource file data to UserData struct: %s", err.Error())
return nil, err
}
return ud, err
}

// ParserClusterDetail Get the cluster info from cluster-detail.json file
func ParserClusterDetail() (*ClusterDetail, error) {
var cd *ClusterDetail
cdContent, err := common.ReadFileContent(config.Test.ClusterDetailFile)
if err != nil {
log.Logger.Errorf("Error happened when read cluster detail: %s", err.Error())
return nil, err
}
err = json.Unmarshal([]byte(cdContent), &cd)
if err != nil {
log.Logger.Errorf("Error happend when parse cluster detail file to ClusterDetail struct: %s", err.Error())
return nil, err
}
return cd, err
}
113 changes: 51 additions & 62 deletions tests/utils/profilehandler/profile_handler.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package profilehandler

import (
"encoding/json"
"fmt"
"strings"
"time"
Expand Down Expand Up @@ -686,67 +685,50 @@ func CreateClusterByProfile(profile *Profile, client *rosacli.Client, waitForClu
func WaitForClusterUninstalled(client *rosacli.Client, cluster string, timeoutMin int) error {

endTime := time.Now().Add(time.Duration(timeoutMin) * time.Minute)
sleepTime := 0
for time.Now().Before(endTime) {
output, err := client.Cluster.DescribeClusterAndReflect(cluster)
output, err := client.Cluster.DescribeCluster(cluster)
desc, err := client.Cluster.ReflectClusterDescription(output)

if err != nil {
outputInfo, err := client.Cluster.DescribeCluster(cluster)
if strings.Contains(outputInfo.String(), "There is no cluster with identifier or name '"+cluster+"'") {
if strings.Contains(output.String(), fmt.Sprintf("There is no cluster with identifier or name '%s'", cluster)) {
log.Logger.Infof("Cluster %s has been deleted.", cluster)
return nil
} else {
return err
}
}

switch output.State {
default:
if strings.Contains(output.State, CON.Uninstalling) {
time.Sleep(2 * time.Minute)
continue
}
if strings.Contains(output.State, CON.Ready) {
log.Logger.Infof("Cluster is in status of %v, wait for uninstalling", CON.Ready)
if sleepTime >= 6 {
return fmt.Errorf("cluster stuck to %s status for more than 6 mins.", output.State)
}
sleepTime += 2
time.Sleep(2 * time.Minute)
continue
}
if strings.Contains(output.State, CON.Error) {
log.Logger.Errorf("Cluster is in %s status now. Recording the installation log", CON.Error)
RecordClusterInstallationLog(client, cluster)
return fmt.Errorf("cluster %s is in %s state with reason: %s",
cluster, CON.Error, output.State)
}
return fmt.Errorf("unknown cluster state %s", output.State)
return err
}

if strings.Contains(desc.State, con.Uninstalling) {
time.Sleep(2 * time.Minute)
continue
}
return fmt.Errorf("Cluster %s is in status of %s which won't be deleted, stop waiting", cluster, desc.State)
}
return fmt.Errorf("timeout for cluster ready waiting after %d mins", timeoutMin)
return fmt.Errorf("timeout for waiting for cluster deletion finished after %d mins", timeoutMin)
}

func DestroyClusterByProfile(client *rosacli.Client, waitForClusterUninstall bool) (errors []error) {
func DestroyClusterByProfile(profile *Profile, client *rosacli.Client, waitForClusterUninstall bool) (errors []error) {
var (
ud UserData
cd ClusterDetail
ud *UserData
cd *ClusterDetail
clusterService rosacli.ClusterService
errDeleteCluster error
rosaClient *rosacli.Client
ocmResourceService rosacli.OCMResourceService
)
region := profile.Region
rosaClient = rosacli.NewClient()
ocmResourceService = rosaClient.OCMResource

// get cluster info from cluster detail file
cdContent, err := common.ReadFileContent(config.Test.ClusterDetailFile)
cd, err := ParserClusterDetail()
if err != nil {
log.Logger.Errorf("Error happened when read cluster detail: %s", err.Error())
errors = append(errors, err)
return
}
fmt.Println(cdContent)
err = json.Unmarshal([]byte(cdContent), &cd)
// get user data from resource file
ud, err = ParseUserData()
if err != nil {
log.Logger.Errorf("Error happend when parse cluster detail file to ClusterDetail struct: %s", err.Error())
errors = append(errors, err)
return
}
Expand All @@ -757,6 +739,7 @@ func DestroyClusterByProfile(client *rosacli.Client, waitForClusterUninstall boo
if errDeleteCluster != nil {
log.Logger.Errorf("Error happened when delete cluster: %s", output.String())
errors = append(errors, errDeleteCluster)
return
}
if waitForClusterUninstall {
log.Logger.Infof("Waiting for the cluster %s to uninstalled", cd.ClusterID)
Expand All @@ -768,37 +751,43 @@ func DestroyClusterByProfile(client *rosacli.Client, waitForClusterUninstall boo
log.Logger.Infof("Delete cluster %s successfully.", cd.ClusterID)
}
}

// get user data from resource file
udContent, err := common.ReadFileContent(config.Test.UserDataFile)
if err != nil {
log.Logger.Errorf("Error happened when read user data: %s", err.Error())
errors = append(errors, err)
return
}
err = json.Unmarshal([]byte(udContent), &ud)
if err != nil {
log.Logger.Errorf("Error happend when parse resource file data to UserData struct: %s", err.Error())
}

// delete KMS key
// schedule KMS key
if ud.KMSKey != "" {
fmt.Printf("kms key: %s", ud.KMSKey)
// DeleteKMSKeyDummy()
log.Logger.Infof("kms key: %s", ud.KMSKey)
err = ScheduleKMSDesiable(ud.KMSKey, region)
if err != nil {
log.Logger.Errorf("Error happened when schedule kms key: %s", err.Error())
errors = append(errors, err)
} else {
log.Logger.Infof("Schedule kms key successfully for cluster: %s", cd.ClusterID)
}
}
// delete audit log arn
if ud.AuditLogArn != "" {
fmt.Printf("audit log arn: %s", ud.AuditLogArn)
// DeleteAuditlogDummy()
log.Logger.Infof("audit log arn: %s", ud.AuditLogArn)
auditLogArnName := strings.Split(ud.AuditLogArn, "/")[1]
err = DeleteAuditLogRoleArn(auditLogArnName, region)
if err != nil {
log.Logger.Errorf("Error happened when delete audit log arn: %s", err.Error())
errors = append(errors, err)
} else {
log.Logger.Infof("Delete audit log arn successfully for cluster: %s", cd.ClusterID)
}
}
// delete vpc chain
if ud.VpcID != "" {
fmt.Printf("vpc id: %s", ud.VpcID)
// DeleteVPCChainDummy()
log.Logger.Infof("vpc id: %s", ud.VpcID)
err = DeleteVPCChain(ud.VpcID, region)
if err != nil {
log.Logger.Errorf("Error happened when delete vpc chain: %s", err.Error())
errors = append(errors, err)
} else {
log.Logger.Infof("Delete vpc chain successfully for cluster: %s", cd.ClusterID)
}
}
// delete operator roles
if ud.OperatorRolesPrefix != "" {
fmt.Printf("operator role prefix: %s", ud.OperatorRolesPrefix)
log.Logger.Infof("operator role prefix: %s", ud.OperatorRolesPrefix)
_, err := ocmResourceService.DeleteOperatorRoles("--prefix", ud.OperatorRolesPrefix, "--mode", "auto", "-y")
if err != nil {
log.Logger.Errorf("Error happened when delete operator role: %s", err.Error())
Expand All @@ -809,7 +798,7 @@ func DestroyClusterByProfile(client *rosacli.Client, waitForClusterUninstall boo
}
// delete oidc config id
if ud.OIDCConfigID != "" {
fmt.Printf("oidc config id: %s", ud.OIDCConfigID)
log.Logger.Infof("oidc config id: %s", ud.OIDCConfigID)
_, err := ocmResourceService.DeleteOIDCConfig("--oidc-config-id", ud.OIDCConfigID, "--mode", "auto", "-y")
if err != nil {
log.Logger.Errorf("Error happened when delete oidc config id: %s", err.Error())
Expand All @@ -820,7 +809,7 @@ func DestroyClusterByProfile(client *rosacli.Client, waitForClusterUninstall boo
}
// delete account roles
if ud.AccountRolesPrefix != "" && errDeleteCluster == nil {
fmt.Printf("accout role prefix: %s", ud.AccountRolesPrefix)
log.Logger.Infof("accout role prefix: %s", ud.AccountRolesPrefix)
_, err := ocmResourceService.DeleteAccountRole("--mode", "auto", "--prefix", ud.AccountRolesPrefix, "-y")
if err != nil {
log.Logger.Errorf("Error happened when delete account roles: %s", err.Error())
Expand Down

0 comments on commit a1b23ac

Please sign in to comment.