From 431cd3948c116563f6cfb0c4bf47e0a8fa23b5e3 Mon Sep 17 00:00:00 2001 From: Luca Zecca Date: Tue, 17 May 2022 16:37:19 +0200 Subject: [PATCH 1/6] chore: removing useless Boundary setup from Ansible --- data/provisioners/cluster/vsphere/provision/all-in-one.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/data/provisioners/cluster/vsphere/provision/all-in-one.yml b/data/provisioners/cluster/vsphere/provision/all-in-one.yml index 61c6eebf1..13646ba00 100644 --- a/data/provisioners/cluster/vsphere/provision/all-in-one.yml +++ b/data/provisioners/cluster/vsphere/provision/all-in-one.yml @@ -13,13 +13,6 @@ name: vmtoolsd state: restarted -- name: Install Boundary target - hosts: all - become: true - roles: - - name: boundary/target - when: enable_boundary_targets|default(false)|bool == true - - name: Copy CA certificates to HAProxy hosts: haproxy become: true From 7cc93f91c9c06184cf3e3b13c43f478260ad828a Mon Sep 17 00:00:00 2001 From: Luca Zecca Date: Thu, 26 May 2022 08:53:55 +0200 Subject: [PATCH 2/6] chore: cleanup boundary in every places --- .../e2e-tests/vsphere/cluster.tpl.yml | 1 - .../integration/vsphere/cluster.yml | 1 - automated-tests/integration/vsphere/tests.sh | 3 +- data/provisioners/cluster/vsphere/main.tf | 1 - .../provisioners/cluster/vsphere/variables.tf | 6 -- internal/cluster/configuration/vsphere.go | 2 - .../provisioners/vsphere/provisioner.go | 63 ++++++++++++------- internal/configuration/templates.go | 21 +++++-- 8 files changed, 59 insertions(+), 39 deletions(-) diff --git a/automated-tests/e2e-tests/vsphere/cluster.tpl.yml b/automated-tests/e2e-tests/vsphere/cluster.tpl.yml index 6b3280be3..c44ec23b2 100644 --- a/automated-tests/e2e-tests/vsphere/cluster.tpl.yml +++ b/automated-tests/e2e-tests/vsphere/cluster.tpl.yml @@ -20,7 +20,6 @@ spec: - 8.8.8.8 domain: localdomain ipOffset: 1818 - boundary: true lbNode: count: 1 template: "${VSPHERE_TEMPLATE_PREFIX}/sighup-ubuntu20LTS-template-v20210115" diff --git a/automated-tests/integration/vsphere/cluster.yml b/automated-tests/integration/vsphere/cluster.yml index 1ddcac52f..8d3715f19 100644 --- a/automated-tests/integration/vsphere/cluster.yml +++ b/automated-tests/integration/vsphere/cluster.yml @@ -22,7 +22,6 @@ spec: - 1.1.1.1 - 8.8.8.8 domain: localdomain - boundary: true lbNode: count: 1 template: TEMPLATES-NODE02/sighup-oraclelinux7.9-template-v20210413 diff --git a/automated-tests/integration/vsphere/tests.sh b/automated-tests/integration/vsphere/tests.sh index c710d267a..8b3817b9f 100644 --- a/automated-tests/integration/vsphere/tests.sh +++ b/automated-tests/integration/vsphere/tests.sh @@ -46,8 +46,7 @@ CPUARCH="amd64_v1" info project_dir="./automated-tests/integration/vsphere/cluster" test(){ - if [ -e ${project_dir}/provision/roles/boundary/target/tasks/main.yml ] && \ - [ -e ${project_dir}/provision/ansible.cfg ] && \ + if [ -e ${project_dir}/provision/ansible.cfg ] && \ [ -e ${project_dir}/bin/terraform ] && \ [ -e ${project_dir}/configuration/.netrc ] && \ [ -e ${project_dir}/logs/terraform.logs ] && \ diff --git a/data/provisioners/cluster/vsphere/main.tf b/data/provisioners/cluster/vsphere/main.tf index 3797a7716..85b24a929 100644 --- a/data/provisioners/cluster/vsphere/main.tf +++ b/data/provisioners/cluster/vsphere/main.tf @@ -70,7 +70,6 @@ module "fury" { net_domain = var.net_domain ip_offset = var.ip_offset - enable_boundary_targets = var.enable_boundary_targets os_user = var.os_user ssh_public_keys = local.ssh_public_keys diff --git a/data/provisioners/cluster/vsphere/variables.tf b/data/provisioners/cluster/vsphere/variables.tf index e51515292..e5da15795 100644 --- a/data/provisioners/cluster/vsphere/variables.tf +++ b/data/provisioners/cluster/vsphere/variables.tf @@ -128,12 +128,6 @@ variable "ip_offset" { description = "Number to sum at every IP calculation. Enable deploying multiple clusters in the same network" } -variable "enable_boundary_targets" { - description = "Enable boundary on all the nodes" - type = bool - default = false -} - variable "os_user" { type = string default = "sighup" diff --git a/internal/cluster/configuration/vsphere.go b/internal/cluster/configuration/vsphere.go index 9181a72c0..c38e858d5 100644 --- a/internal/cluster/configuration/vsphere.go +++ b/internal/cluster/configuration/vsphere.go @@ -18,8 +18,6 @@ type VSphere struct { NetworkConfig VSphereNetworkConfig `yaml:"networkConfig"` - Boundary bool `yaml:"boundary"` - LoadBalancerNode VSphereKubeLoadBalancer `yaml:"lbNode"` MasterNode VSphereKubeNode `yaml:"masterNode"` InfraNode VSphereKubeNode `yaml:"infraNode"` diff --git a/internal/cluster/provisioners/vsphere/provisioner.go b/internal/cluster/provisioners/vsphere/provisioner.go index 605b6537c..4177ca094 100644 --- a/internal/cluster/provisioners/vsphere/provisioner.go +++ b/internal/cluster/provisioners/vsphere/provisioner.go @@ -14,7 +14,6 @@ import ( "os/exec" "path/filepath" "runtime" - "strconv" "strings" "github.com/gobuffalo/packr/v2" @@ -69,19 +68,16 @@ func (e *VSphere) UpdateMessage() string { log.Error("Can not get `ansible_inventory` value") } inventory, _ := aini.Parse(strings.NewReader(inventoryOutput)) - kubernetes_control_plane_address := strings.Replace(inventory.Groups["all"].Vars["kubernetes_control_plane_address"], "\"", "", -1) - enable_boundary_targets := strings.Replace(inventory.Groups["all"].Vars["enable_boundary_targets"], "\"", "", -1) - enable_boundary_targets_b, _ := strconv.ParseBool(enable_boundary_targets) + kubernetes_control_plane_address := strings.Replace( + inventory.Groups["all"].Vars["kubernetes_control_plane_address"], + "\"", + "", + -1, + ) clusterOperatorName := strings.Replace(inventory.Groups["all"].Vars["ansible_user"], "\"", "", -1) - boundary_message := "" - - if enable_boundary_targets_b { - boundary_message = fmt.Sprintf(` -Boundary is enabled in this setup so you can use SIGHUP Boundary setup to access this cluster with the boundary-ops user -`) - } - return fmt.Sprintf(`[vSphere] Fury + return fmt.Sprintf( + `[vSphere] Fury All the cluster components are up to date. vSphere Kubernetes cluster ready. @@ -98,7 +94,8 @@ Then access by running: $ ssh %v@node-name-reported-by-kubectl-get-nodes -`, kubernetes_control_plane_address, clusterOperatorName, clusterOperatorName, boundary_message, clusterOperatorName) +`, kubernetes_control_plane_address, clusterOperatorName, clusterOperatorName, clusterOperatorName, + ) } // DestroyMessage return a custom provisioner message the user will see once the cluster is destroyed @@ -164,10 +161,14 @@ func (e VSphere) createVarFile() (err error) { buffer.WriteString(fmt.Sprintf("network = \"%v\"\n", spec.NetworkConfig.Name)) buffer.WriteString(fmt.Sprintf("net_cidr = \"%v\"\n", spec.ClusterCIDR)) buffer.WriteString(fmt.Sprintf("net_gateway = \"%v\"\n", spec.NetworkConfig.Gateway)) - buffer.WriteString(fmt.Sprintf("net_nameservers = [\"%v\"]\n", strings.Join(spec.NetworkConfig.Nameservers, "\",\""))) + buffer.WriteString( + fmt.Sprintf( + "net_nameservers = [\"%v\"]\n", + strings.Join(spec.NetworkConfig.Nameservers, "\",\""), + ), + ) buffer.WriteString(fmt.Sprintf("net_domain = \"%v\"\n", spec.NetworkConfig.Domain)) buffer.WriteString(fmt.Sprintf("ip_offset = %v\n", spec.NetworkConfig.IPOffset)) - buffer.WriteString(fmt.Sprintf("enable_boundary_targets = %v\n", spec.Boundary)) if len(spec.SSHPublicKey) > 0 { buffer.WriteString(fmt.Sprintf("ssh_public_keys = [\"%v\"]\n", strings.Join(spec.SSHPublicKey, "\",\""))) } else { @@ -193,7 +194,12 @@ func (e VSphere) createVarFile() (err error) { buffer.WriteString("kube_master_labels = {}\n") } if len(spec.MasterNode.Taints) > 0 { - buffer.WriteString(fmt.Sprintf("kube_master_taints = [\"%v\"]\n", strings.Join(spec.MasterNode.Taints, "\",\""))) + buffer.WriteString( + fmt.Sprintf( + "kube_master_taints = [\"%v\"]\n", + strings.Join(spec.MasterNode.Taints, "\",\""), + ), + ) } else { buffer.WriteString("kube_master_taints = []\n") } @@ -260,7 +266,10 @@ func (e VSphere) createVarFile() (err error) { if err != nil { return err } - err = e.terraform.FormatWrite(context.Background(), tfexec.Dir(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir()))) + err = e.terraform.FormatWrite( + context.Background(), + tfexec.Dir(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())), + ) if err != nil { return err } @@ -355,7 +364,10 @@ func (e VSphere) Plan() (err error) { return err } var changes bool - changes, err = e.terraform.Plan(context.Background(), tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir()))) + changes, err = e.terraform.Plan( + context.Background(), + tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())), + ) if err != nil { log.Fatalf("[DRYRUN] Something went wrong while updating vsphere. %v", err) return err @@ -377,7 +389,10 @@ func (e VSphere) Update() (string, error) { if err != nil { return "", err } - err = e.terraform.Apply(context.Background(), tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir()))) + err = e.terraform.Apply( + context.Background(), + tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())), + ) if err != nil { log.Fatalf("Something went wrong while updating vsphere. %v", err) return "", err @@ -414,7 +429,10 @@ func (e VSphere) Update() (string, error) { return "", err } - kubeconfig, err := runAnsiblePlaybook(filepath.Join(e.terraform.WorkingDir(), "provision"), filepath.Join(e.terraform.WorkingDir(), "logs")) + kubeconfig, err := runAnsiblePlaybook( + filepath.Join(e.terraform.WorkingDir(), "provision"), + filepath.Join(e.terraform.WorkingDir(), "logs"), + ) log.Info("VSphere Updated") return kubeconfig, err } @@ -426,7 +444,10 @@ func (e VSphere) Destroy() (err error) { if err != nil { return err } - err = e.terraform.Destroy(context.Background(), tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir()))) + err = e.terraform.Destroy( + context.Background(), + tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())), + ) if err != nil { log.Fatalf("Something went wrong while destroying VSphere cluster project. %v", err) return err diff --git a/internal/configuration/templates.go b/internal/configuration/templates.go index 9284548fc..e6ec39d03 100644 --- a/internal/configuration/templates.go +++ b/internal/configuration/templates.go @@ -110,8 +110,14 @@ func bootstrapTemplate(config *Configuration) error { } config.Spec = spec default: - log.Errorf("Error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner) - return fmt.Errorf("error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner) + log.Errorf( + "Error creating a template configuration file. Parser not found for %v provisioner", + config.Provisioner, + ) + return fmt.Errorf( + "error creating a template configuration file. Parser not found for %v provisioner", + config.Provisioner, + ) } createBase(config) return nil @@ -270,7 +276,6 @@ func clusterTemplate(config *Configuration) error { Domain: "localdomain", IPOffset: 0, }, - Boundary: true, LoadBalancerNode: clustercfg.VSphereKubeLoadBalancer{ Count: 1, Template: "ubuntu-20.04 # The name of the base image to use for the VMs", @@ -321,8 +326,14 @@ func clusterTemplate(config *Configuration) error { } config.Spec = spec default: - log.Errorf("error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner) - return fmt.Errorf("error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner) + log.Errorf( + "error creating a template configuration file. Parser not found for %v provisioner", + config.Provisioner, + ) + return fmt.Errorf( + "error creating a template configuration file. Parser not found for %v provisioner", + config.Provisioner, + ) } createBase(config) return nil From f6eb76995b1cd1ed8545f89c0b4d7eac6a95de7f Mon Sep 17 00:00:00 2001 From: omissis Date: Thu, 26 May 2022 15:51:29 +0200 Subject: [PATCH 3/6] fix: remove extra forgotten sprintf formatter --- internal/cluster/provisioners/vsphere/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/cluster/provisioners/vsphere/provisioner.go b/internal/cluster/provisioners/vsphere/provisioner.go index 4177ca094..631d95bff 100644 --- a/internal/cluster/provisioners/vsphere/provisioner.go +++ b/internal/cluster/provisioners/vsphere/provisioner.go @@ -85,7 +85,7 @@ vSphere Kubernetes cluster ready. vSphere Cluster Endpoint: %v SSH Operator Name: %v -Use the ssh %v username to access the vSphere instances with the configured SSH key. %v +Use the ssh %v username to access the vSphere instances with the configured SSH key. Discover the instances by running $ kubectl get nodes From 4a64ee1ffa0d30788f7e126e163fec0d08671ddb Mon Sep 17 00:00:00 2001 From: omissis Date: Wed, 3 May 2023 16:05:59 +0200 Subject: [PATCH 4/6] chore: bump fury-provisioners version --- data/provisioners/cluster/vsphere/main.tf | 2 +- internal/cluster/provisioners/vsphere/provisioner.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/data/provisioners/cluster/vsphere/main.tf b/data/provisioners/cluster/vsphere/main.tf index 85b24a929..93cbdda4d 100644 --- a/data/provisioners/cluster/vsphere/main.tf +++ b/data/provisioners/cluster/vsphere/main.tf @@ -44,7 +44,7 @@ locals { } module "fury" { - source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.6.3.zip//furyctl-provisioners-0.6.3/modules/cluster/vsphere" + source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/modules/cluster/vsphere" name = var.name kube_version = var.kube_version diff --git a/internal/cluster/provisioners/vsphere/provisioner.go b/internal/cluster/provisioners/vsphere/provisioner.go index 631d95bff..19ef73d26 100644 --- a/internal/cluster/provisioners/vsphere/provisioner.go +++ b/internal/cluster/provisioners/vsphere/provisioner.go @@ -346,7 +346,7 @@ func downloadAnsibleRoles(workingDirectory string) error { } client := &getter.Client{ - Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.6.3.zip//furyctl-provisioners-0.6.3/roles", + Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/roles", Dst: downloadPath, Pwd: workingDirectory, Mode: getter.ClientModeAny, From ac3cd2d1b03ead7d806b730f4d029802d1fdc98e Mon Sep 17 00:00:00 2001 From: omissis Date: Wed, 3 May 2023 16:12:58 +0200 Subject: [PATCH 5/6] chore: set fury-provisioners to rc for testing --- data/provisioners/cluster/vsphere/main.tf | 2 +- internal/cluster/provisioners/vsphere/provisioner.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/data/provisioners/cluster/vsphere/main.tf b/data/provisioners/cluster/vsphere/main.tf index 93cbdda4d..242a5a581 100644 --- a/data/provisioners/cluster/vsphere/main.tf +++ b/data/provisioners/cluster/vsphere/main.tf @@ -44,7 +44,7 @@ locals { } module "fury" { - source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/modules/cluster/vsphere" + source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0-rc.0.zip//furyctl-provisioners-0.7.0-rc.0/modules/cluster/vsphere" name = var.name kube_version = var.kube_version diff --git a/internal/cluster/provisioners/vsphere/provisioner.go b/internal/cluster/provisioners/vsphere/provisioner.go index 19ef73d26..51b043f36 100644 --- a/internal/cluster/provisioners/vsphere/provisioner.go +++ b/internal/cluster/provisioners/vsphere/provisioner.go @@ -346,7 +346,7 @@ func downloadAnsibleRoles(workingDirectory string) error { } client := &getter.Client{ - Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/roles", + Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0-rc.0.zip//furyctl-provisioners-0.7.0-rc.0/roles", Dst: downloadPath, Pwd: workingDirectory, Mode: getter.ClientModeAny, From baf7aa07e3c397a9d751e1908978463dc0d03d05 Mon Sep 17 00:00:00 2001 From: omissis Date: Wed, 3 May 2023 18:44:51 +0200 Subject: [PATCH 6/6] chore: set fury-provisioners to stable after testing --- data/provisioners/cluster/vsphere/main.tf | 2 +- internal/cluster/provisioners/vsphere/provisioner.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/data/provisioners/cluster/vsphere/main.tf b/data/provisioners/cluster/vsphere/main.tf index 242a5a581..93cbdda4d 100644 --- a/data/provisioners/cluster/vsphere/main.tf +++ b/data/provisioners/cluster/vsphere/main.tf @@ -44,7 +44,7 @@ locals { } module "fury" { - source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0-rc.0.zip//furyctl-provisioners-0.7.0-rc.0/modules/cluster/vsphere" + source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/modules/cluster/vsphere" name = var.name kube_version = var.kube_version diff --git a/internal/cluster/provisioners/vsphere/provisioner.go b/internal/cluster/provisioners/vsphere/provisioner.go index 51b043f36..19ef73d26 100644 --- a/internal/cluster/provisioners/vsphere/provisioner.go +++ b/internal/cluster/provisioners/vsphere/provisioner.go @@ -346,7 +346,7 @@ func downloadAnsibleRoles(workingDirectory string) error { } client := &getter.Client{ - Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0-rc.0.zip//furyctl-provisioners-0.7.0-rc.0/roles", + Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/roles", Dst: downloadPath, Pwd: workingDirectory, Mode: getter.ClientModeAny,