Skip to content

Commit

Permalink
Merge pull request #217 from richardcase/azure_resource_group
Browse files Browse the repository at this point in the history
refactor: change azure resource group
  • Loading branch information
alexander-demicev committed Oct 19, 2023
2 parents ee02b86 + faa12fb commit 7cb2be1
Show file tree
Hide file tree
Showing 10 changed files with 83 additions and 167 deletions.
9 changes: 9 additions & 0 deletions test/e2e/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,15 @@ var (

//go:embed data/rancher/azure-cluster.yaml
V2ProvAzureCluster []byte

//go:embed data/cluster-templates/docker-kubeadm.yaml
CAPIDockerKubeadm []byte

//go:embed data/cluster-templates/aws-eks-mmp.yaml
CAPIAwsEKSMMP []byte

//go:embed data/cluster-templates/azure-aks-mmp.yaml
CAPIAzureAKSMMP []byte
)

const (
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/data/rancher/azure-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ spec:
machinePools:
- controlPlaneRole: true
dynamicSchemaSpec: '{"resourceFields":{"acceleratedNetworking":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Specify
if an Accelerated Networking NIC should be created for your VM"},"availabilitySet":{"type":"string","default":{"stringValue":"docker-machine","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
if an Accelerated Networking NIC should be created for your VM"},"availabilitySet":{"type":"string","default":{"stringValue":"highlander-e2e","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Availability Set to place the virtual machine into"},"availabilityZone":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Specify
the Availability Zones the Azure resources should be created in"},"clientId":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Service Principal Account ID (optional, browser auth is used if not specified)"},"clientSecret":{"type":"password","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Expand All @@ -45,19 +45,19 @@ spec:
the specified port number accessible from the Internet"},"plan":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Purchase
plan for Azure Virtual Machine (in \u003cpublisher\u003e:\u003cproduct\u003e:\u003cplan\u003e
format)"},"privateIpAddress":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Specify
a static private IP address for the machine"},"resourceGroup":{"type":"string","default":{"stringValue":"docker-machine","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
a static private IP address for the machine"},"resourceGroup":{"type":"string","default":{"stringValue":"highlander-e2e","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Resource Group name (will be created if missing)"},"size":{"type":"string","default":{"stringValue":"Standard_D2_v2","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Size
for Azure Virtual Machine"},"sshUser":{"type":"string","default":{"stringValue":"docker-user","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Username
for SSH login"},"staticPublicIp":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Assign
a static public IP address to the machine"},"storageType":{"type":"string","default":{"stringValue":"Standard_LRS","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Type
of Storage Account to host the OS Disk for the machine"},"subnet":{"type":"string","default":{"stringValue":"docker-machine","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
of Storage Account to host the OS Disk for the machine"},"subnet":{"type":"string","default":{"stringValue":"highlander-e2e","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Subnet Name to be used within the Virtual Network"},"subnetPrefix":{"type":"string","default":{"stringValue":"192.168.0.0/16","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Private
CIDR block to be used for the new subnet, should comply RFC 1918"},"subscriptionId":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Subscription ID"},"tags":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Tags
to be applied to the Azure VM instance"},"tenantId":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Tenant ID"},"updateDomainCount":{"type":"string","default":{"stringValue":"5","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Update
domain count to use for availability set"},"usePrivateIp":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Use
private IP address of the machine to connect"},"vnet":{"type":"string","default":{"stringValue":"docker-machine-vnet","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
private IP address of the machine to connect"},"vnet":{"type":"string","default":{"stringValue":"highlander-e2e-vnet","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure
Virtual Network name to connect the virtual machine (in [resourcegroup:]name
format)"}}}'
etcdRole: true
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/data/rancher/azure-rke-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ metadata:
name: ${POOL_NAME}
namespace: fleet-default
acceleratedNetworking: false
availabilitySet: docker-machine
availabilitySet: highlander-e2e
availabilityZone: ""
diskSize: "30"
dockerPort: "2376"
Expand All @@ -30,13 +30,13 @@ openPort:
- 10251/tcp
- 10252/tcp
plan: ""
resourceGroup: docker-machine
resourceGroup: highlander-e2e
size: Standard_D2_v2
sshUser: docker-user
staticPublicIp: false
storageType: Standard_LRS
subnet: docker-machine
subnet: highlander-e2e
subnetPrefix: 192.168.0.0/16
updateDomainCount: "5"
usePrivateIp: false
vnet: docker-machine-vnet
vnet: highlander-e2e-vnet
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

package import_gitops
package specs

import (
"context"
Expand Down
158 changes: 26 additions & 132 deletions test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,147 +20,41 @@ limitations under the License.
package embedded_capi_disabled

import (
"fmt"
"os"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "sigs.k8s.io/controller-runtime/pkg/envtest/komega"

"github.com/drone/envsubst/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/envtest/komega"
"k8s.io/utils/ptr"

provisioningv1 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/provisioning/v1"
"github.com/rancher-sandbox/rancher-turtles/test/e2e"
turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework"
"github.com/rancher-sandbox/rancher-turtles/test/e2e/specs"
)

var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still work", Label(e2e.FullTestLabel), func() {
var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import (embedded capi disable from start)", Label(e2e.FullTestLabel), func() {

BeforeEach(func() {
komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient())
komega.SetContext(ctx)
SetClient(setupClusterResult.BootstrapClusterProxy.GetClient())
SetContext(ctx)
})

It("Should create a RKE2 cluster in Azure", func() {
azSubId := e2eConfig.GetVariable(e2e.AzureSubIDVar)
Expect(azSubId).ToNot(BeEmpty(), "Azure Subscription ID is required")
azClientId := e2eConfig.GetVariable(e2e.AzureClientIDVar)
Expect(azSubId).ToNot(BeEmpty(), "Azure Client ID is required")
azClientSecret := e2eConfig.GetVariable(e2e.AzureClientSecretVar)
Expect(azSubId).ToNot(BeEmpty(), "Azure Client Secret is required")

rke2Version := e2eConfig.GetVariable(e2e.RKE2VersionVar)
Expect(rke2Version).ToNot(BeEmpty(), "RKE2 version is required")

credsSecretName := "cc-test99"
credsName := "az-ecm"
poolName := "az-test-pool"
clusterName := "az-cluster1"

lookupResult := &turtlesframework.RancherLookupUserResult{}
turtlesframework.RancherLookupUser(ctx, turtlesframework.RancherLookupUserInput{
Username: "admin",
ClusterProxy: setupClusterResult.BootstrapClusterProxy,
}, lookupResult)

turtlesframework.CreateSecret(ctx, turtlesframework.CreateSecretInput{
Creator: setupClusterResult.BootstrapClusterProxy.GetClient(),
Name: credsSecretName,
Namespace: "cattle-global-data",
Type: corev1.SecretTypeOpaque,
Data: map[string]string{
"azurecredentialConfig-clientId": azClientId,
"azurecredentialConfig-clientSecret": azClientSecret,
"azurecredentialConfig-environment": "AzurePublicCloud",
"azurecredentialConfig-subscriptionId": azSubId,
"azurecredentialConfig-tenantId": "",
},
Annotations: map[string]string{
"field.cattle.io/name": credsName,
"provisioning.cattle.io/driver": "azure",
"field.cattle.io/creatorId": lookupResult.User,
},
Labels: map[string]string{
"cattle.io/creator": "norman",
},
})

rkeConfig, err := envsubst.Eval(string(e2e.V2ProvAzureRkeConfig), func(s string) string {
switch s {
case "POOL_NAME":
return poolName
case "USER":
return lookupResult.User
default:
return os.Getenv(s)
}
})
Expect(err).ToNot(HaveOccurred())
Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(rkeConfig))).To(Succeed(), "Failed apply Digital Ocean RKE config")

cluster, err := envsubst.Eval(string(e2e.V2ProvAzureCluster), func(s string) string {
switch s {
case "CLUSTER_NAME":
return clusterName
case "USER":
return lookupResult.User
case "CREDENTIAL_SECRET":
return fmt.Sprintf("cattle-global-data:%s", credsSecretName)
case "KUBERNETES_VERSION":
return rke2Version
case "AZ_CONFIG_NAME":
return poolName
default:
return os.Getenv(s)
}
})
Expect(err).ToNot(HaveOccurred())
Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(cluster))).To(Succeed(), "Failed apply Digital Ocean cluster config")

By("Waiting for the rancher cluster record to appear")
rancherCluster := &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{
Namespace: "fleet-default",
Name: clusterName,
}}
Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed())

By("Waiting for the rancher cluster to have a deployed agent")
Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-v2prov-create")...).Should(HaveField("Status.AgentDeployed", BeTrue()))

By("Waiting for the rancher cluster to be ready")
Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue()))

By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig")
rancherKubeconfig := &turtlesframework.RancherGetClusterKubeconfigResult{}
turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{
Getter: setupClusterResult.BootstrapClusterProxy.GetClient(),
SecretName: fmt.Sprintf("%s-kubeconfig", rancherCluster.Name),
Namespace: rancherCluster.Namespace,
RancherServerURL: hostName,
WriteToTempFile: true,
}, rancherKubeconfig)

rancherConnectRes := &turtlesframework.RunCommandResult{}
turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{
Command: "kubectl",
Args: []string{
"--kubeconfig",
rancherKubeconfig.TempFilePath,
"get",
"nodes",
"--insecure-skip-tls-verify",
},
}, rancherConnectRes)
Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig")
Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code")

By("Deleting cluster from Rancher")
err = setupClusterResult.BootstrapClusterProxy.GetClient().Delete(ctx, rancherCluster)
Expect(err).NotTo(HaveOccurred(), "Failed to delete rancher cluster")

By("Waiting for the rancher cluster record to be removed")
Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-azure-delete")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted")
specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput {
return specs.CreateUsingGitOpsSpecInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
ClusterctlConfigPath: flagVals.ConfigPath,
ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath,
ArtifactFolder: flagVals.ArtifactFolder,
ClusterTemplate: e2e.CAPIDockerKubeadm,
ClusterName: "highlander-e2e-cluster1",
ControlPlaneMachineCount: ptr.To[int](1),
WorkerMachineCount: ptr.To[int](1),
GitAddr: giteaResult.GitAddress,
GitAuthSecretName: e2e.AuthSecretName,
SkipCleanup: false,
SkipDeletionTest: false,
LabelNamespace: true,
RancherServerURL: hostName,
CAPIClusterCreateWaitName: "wait-rancher",
DeleteClusterWaitName: "wait-controllers",
}
})
})
22 changes: 22 additions & 0 deletions test/e2e/suites/embedded-capi-disabled/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ var (
ctx = context.Background()

setupClusterResult *testenv.SetupTestClusterResult
giteaResult *testenv.DeployGiteaResult
)

func init() {
Expand Down Expand Up @@ -119,6 +120,8 @@ var _ = BeforeSuite(func() {
DefaultIngressClassPatch: e2e.IngressClassPatch,
})

// NOTE: deploy Rancher first with the embedded-cluster-api feature disabled.
// and the deploy Rancher Turtles.
testenv.DeployRancher(ctx, testenv.DeployRancherInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
Expand Down Expand Up @@ -158,6 +161,25 @@ var _ = BeforeSuite(func() {
"rancherTurtles.features.embedded-capi.disabled": "false",
},
})

giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar),
ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar),
ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar),
ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar),
ValuesFilePath: "../../data/gitea/values.yaml",
Values: map[string]string{
"gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar),
"gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar),
},
RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"),
ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-getservice"),
AuthSecretName: e2e.AuthSecretName,
Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar),
Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar),
})
})

var _ = AfterSuite(func() {
Expand Down
43 changes: 17 additions & 26 deletions test/e2e/suites/import-gitops/import_gitops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,25 +20,16 @@ limitations under the License.
package import_gitops

import (
_ "embed"

. "github.com/onsi/ginkgo/v2"
. "sigs.k8s.io/controller-runtime/pkg/envtest/komega"

"github.com/rancher-sandbox/rancher-turtles/test/e2e"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/envtest/komega"
. "sigs.k8s.io/controller-runtime/pkg/envtest/komega"

_ "embed"
)

var (
//go:embed cluster-templates/docker-kubeadm.yaml
dockerKubeadm []byte

//go:embed cluster-templates/aws-eks-mmp.yaml
awsEKSMMP []byte

//go:embed cluster-templates/azure-aks-mmp.yaml
azureAKSMMP []byte
"github.com/rancher-sandbox/rancher-turtles/test/e2e"
"github.com/rancher-sandbox/rancher-turtles/test/e2e/specs"
)

var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel, e2e.FullTestLabel), func() {
Expand All @@ -48,15 +39,15 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit
SetContext(ctx)
})

CreateUsingGitOpsSpec(ctx, func() CreateUsingGitOpsSpecInput {
return CreateUsingGitOpsSpecInput{
specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput {
return specs.CreateUsingGitOpsSpecInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
ClusterctlConfigPath: flagVals.ConfigPath,
ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath,
ArtifactFolder: flagVals.ArtifactFolder,
ClusterTemplate: dockerKubeadm,
ClusterName: "hl-e2e-cluster1",
ClusterTemplate: e2e.CAPIDockerKubeadm,
ClusterName: "highlander-e2e-cluster1",
ControlPlaneMachineCount: ptr.To[int](1),
WorkerMachineCount: ptr.To[int](1),
GitAddr: giteaResult.GitAddress,
Expand All @@ -78,15 +69,15 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul
komega.SetContext(ctx)
})

CreateUsingGitOpsSpec(ctx, func() CreateUsingGitOpsSpecInput {
return CreateUsingGitOpsSpecInput{
specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput {
return specs.CreateUsingGitOpsSpecInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
ClusterctlConfigPath: flagVals.ConfigPath,
ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath,
ArtifactFolder: flagVals.ArtifactFolder,
ClusterTemplate: awsEKSMMP,
ClusterName: "hl-e2e-cluster2",
ClusterTemplate: e2e.CAPIAwsEKSMMP,
ClusterName: "highlander-e2e-cluster2",
ControlPlaneMachineCount: ptr.To[int](1),
WorkerMachineCount: ptr.To[int](1),
GitAddr: giteaResult.GitAddress,
Expand All @@ -108,14 +99,14 @@ var _ = Describe("[Azure] [AKS] Create and delete CAPI cluster functionality sho
SetContext(ctx)
})

CreateUsingGitOpsSpec(ctx, func() CreateUsingGitOpsSpecInput {
return CreateUsingGitOpsSpecInput{
specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput {
return specs.CreateUsingGitOpsSpecInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
ClusterctlConfigPath: flagVals.ConfigPath,
ArtifactFolder: flagVals.ArtifactFolder,
ClusterTemplate: azureAKSMMP,
ClusterName: "hl-e2e-cluster3",
ClusterTemplate: e2e.CAPIAzureAKSMMP,
ClusterName: "highlander-e2e-cluster3",
ControlPlaneMachineCount: ptr.To[int](1),
WorkerMachineCount: ptr.To[int](1),
GitAddr: giteaResult.GitAddress,
Expand Down

0 comments on commit 7cb2be1

Please sign in to comment.