Skip to content

Commit

Permalink
Merge pull request #74 from sighupio/hotfix/removing-boundary-ansible
Browse files Browse the repository at this point in the history
chore: removing useless Boundary setup from Ansible
  • Loading branch information
omissis committed May 3, 2023
2 parents 6b4ab41 + baf7aa0 commit 9abe923
Show file tree
Hide file tree
Showing 9 changed files with 62 additions and 49 deletions.
1 change: 0 additions & 1 deletion automated-tests/e2e-tests/vsphere/cluster.tpl.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ spec:
- 8.8.8.8
domain: localdomain
ipOffset: 1818
boundary: true
lbNode:
count: 1
template: "${VSPHERE_TEMPLATE_PREFIX}/sighup-ubuntu20LTS-template-v20210115"
Expand Down
1 change: 0 additions & 1 deletion automated-tests/integration/vsphere/cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ spec:
- 1.1.1.1
- 8.8.8.8
domain: localdomain
boundary: true
lbNode:
count: 1
template: TEMPLATES-NODE02/sighup-oraclelinux7.9-template-v20210413
Expand Down
3 changes: 1 addition & 2 deletions automated-tests/integration/vsphere/tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@ CPUARCH="amd64_v1"
info
project_dir="./automated-tests/integration/vsphere/cluster"
test(){
if [ -e ${project_dir}/provision/roles/boundary/target/tasks/main.yml ] && \
[ -e ${project_dir}/provision/ansible.cfg ] && \
if [ -e ${project_dir}/provision/ansible.cfg ] && \
[ -e ${project_dir}/bin/terraform ] && \
[ -e ${project_dir}/configuration/.netrc ] && \
[ -e ${project_dir}/logs/terraform.logs ] && \
Expand Down
3 changes: 1 addition & 2 deletions data/provisioners/cluster/vsphere/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ locals {
}

module "fury" {
source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.6.3.zip//furyctl-provisioners-0.6.3/modules/cluster/vsphere"
source = "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/modules/cluster/vsphere"

name = var.name
kube_version = var.kube_version
Expand All @@ -70,7 +70,6 @@ module "fury" {
net_domain = var.net_domain
ip_offset = var.ip_offset

enable_boundary_targets = var.enable_boundary_targets
os_user = var.os_user
ssh_public_keys = local.ssh_public_keys

Expand Down
7 changes: 0 additions & 7 deletions data/provisioners/cluster/vsphere/provision/all-in-one.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,6 @@
name: vmtoolsd
state: restarted

- name: Install Boundary target
hosts: all
become: true
roles:
- name: boundary/target
when: enable_boundary_targets|default(false)|bool == true

- name: Copy CA certificates to HAProxy
hosts: haproxy
become: true
Expand Down
6 changes: 0 additions & 6 deletions data/provisioners/cluster/vsphere/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -128,12 +128,6 @@ variable "ip_offset" {
description = "Number to sum at every IP calculation. Enable deploying multiple clusters in the same network"
}

variable "enable_boundary_targets" {
description = "Enable boundary on all the nodes"
type = bool
default = false
}

variable "os_user" {
type = string
default = "sighup"
Expand Down
2 changes: 0 additions & 2 deletions internal/cluster/configuration/vsphere.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@ type VSphere struct {

NetworkConfig VSphereNetworkConfig `yaml:"networkConfig"`

Boundary bool `yaml:"boundary"`

LoadBalancerNode VSphereKubeLoadBalancer `yaml:"lbNode"`
MasterNode VSphereKubeNode `yaml:"masterNode"`
InfraNode VSphereKubeNode `yaml:"infraNode"`
Expand Down
67 changes: 44 additions & 23 deletions internal/cluster/provisioners/vsphere/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"

"github.com/gobuffalo/packr/v2"
Expand Down Expand Up @@ -69,27 +68,24 @@ func (e *VSphere) UpdateMessage() string {
log.Error("Can not get `ansible_inventory` value")
}
inventory, _ := aini.Parse(strings.NewReader(inventoryOutput))
kubernetes_control_plane_address := strings.Replace(inventory.Groups["all"].Vars["kubernetes_control_plane_address"], "\"", "", -1)
enable_boundary_targets := strings.Replace(inventory.Groups["all"].Vars["enable_boundary_targets"], "\"", "", -1)
enable_boundary_targets_b, _ := strconv.ParseBool(enable_boundary_targets)
kubernetes_control_plane_address := strings.Replace(
inventory.Groups["all"].Vars["kubernetes_control_plane_address"],
"\"",
"",
-1,
)
clusterOperatorName := strings.Replace(inventory.Groups["all"].Vars["ansible_user"], "\"", "", -1)
boundary_message := ""

if enable_boundary_targets_b {
boundary_message = fmt.Sprintf(`
Boundary is enabled in this setup so you can use SIGHUP Boundary setup to access this cluster with the boundary-ops user
`)
}

return fmt.Sprintf(`[vSphere] Fury
return fmt.Sprintf(
`[vSphere] Fury
All the cluster components are up to date.
vSphere Kubernetes cluster ready.
vSphere Cluster Endpoint: %v
SSH Operator Name: %v
Use the ssh %v username to access the vSphere instances with the configured SSH key. %v
Use the ssh %v username to access the vSphere instances with the configured SSH key.
Discover the instances by running
$ kubectl get nodes
Expand All @@ -98,7 +94,8 @@ Then access by running:
$ ssh %v@node-name-reported-by-kubectl-get-nodes
`, kubernetes_control_plane_address, clusterOperatorName, clusterOperatorName, boundary_message, clusterOperatorName)
`, kubernetes_control_plane_address, clusterOperatorName, clusterOperatorName, clusterOperatorName,
)
}

// DestroyMessage return a custom provisioner message the user will see once the cluster is destroyed
Expand Down Expand Up @@ -164,10 +161,14 @@ func (e VSphere) createVarFile() (err error) {
buffer.WriteString(fmt.Sprintf("network = \"%v\"\n", spec.NetworkConfig.Name))
buffer.WriteString(fmt.Sprintf("net_cidr = \"%v\"\n", spec.ClusterCIDR))
buffer.WriteString(fmt.Sprintf("net_gateway = \"%v\"\n", spec.NetworkConfig.Gateway))
buffer.WriteString(fmt.Sprintf("net_nameservers = [\"%v\"]\n", strings.Join(spec.NetworkConfig.Nameservers, "\",\"")))
buffer.WriteString(
fmt.Sprintf(
"net_nameservers = [\"%v\"]\n",
strings.Join(spec.NetworkConfig.Nameservers, "\",\""),
),
)
buffer.WriteString(fmt.Sprintf("net_domain = \"%v\"\n", spec.NetworkConfig.Domain))
buffer.WriteString(fmt.Sprintf("ip_offset = %v\n", spec.NetworkConfig.IPOffset))
buffer.WriteString(fmt.Sprintf("enable_boundary_targets = %v\n", spec.Boundary))
if len(spec.SSHPublicKey) > 0 {
buffer.WriteString(fmt.Sprintf("ssh_public_keys = [\"%v\"]\n", strings.Join(spec.SSHPublicKey, "\",\"")))
} else {
Expand All @@ -193,7 +194,12 @@ func (e VSphere) createVarFile() (err error) {
buffer.WriteString("kube_master_labels = {}\n")
}
if len(spec.MasterNode.Taints) > 0 {
buffer.WriteString(fmt.Sprintf("kube_master_taints = [\"%v\"]\n", strings.Join(spec.MasterNode.Taints, "\",\"")))
buffer.WriteString(
fmt.Sprintf(
"kube_master_taints = [\"%v\"]\n",
strings.Join(spec.MasterNode.Taints, "\",\""),
),
)
} else {
buffer.WriteString("kube_master_taints = []\n")
}
Expand Down Expand Up @@ -260,7 +266,10 @@ func (e VSphere) createVarFile() (err error) {
if err != nil {
return err
}
err = e.terraform.FormatWrite(context.Background(), tfexec.Dir(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())))
err = e.terraform.FormatWrite(
context.Background(),
tfexec.Dir(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())),
)
if err != nil {
return err
}
Expand Down Expand Up @@ -337,7 +346,7 @@ func downloadAnsibleRoles(workingDirectory string) error {
}

client := &getter.Client{
Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.6.3.zip//furyctl-provisioners-0.6.3/roles",
Src: "https://github.com/sighupio/furyctl-provisioners/archive/refs/tags/v0.7.0.zip//furyctl-provisioners-0.7.0/roles",
Dst: downloadPath,
Pwd: workingDirectory,
Mode: getter.ClientModeAny,
Expand All @@ -355,7 +364,10 @@ func (e VSphere) Plan() (err error) {
return err
}
var changes bool
changes, err = e.terraform.Plan(context.Background(), tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())))
changes, err = e.terraform.Plan(
context.Background(),
tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())),
)
if err != nil {
log.Fatalf("[DRYRUN] Something went wrong while updating vsphere. %v", err)
return err
Expand All @@ -377,7 +389,10 @@ func (e VSphere) Update() (string, error) {
if err != nil {
return "", err
}
err = e.terraform.Apply(context.Background(), tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())))
err = e.terraform.Apply(
context.Background(),
tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())),
)
if err != nil {
log.Fatalf("Something went wrong while updating vsphere. %v", err)
return "", err
Expand Down Expand Up @@ -414,7 +429,10 @@ func (e VSphere) Update() (string, error) {
return "", err
}

kubeconfig, err := runAnsiblePlaybook(filepath.Join(e.terraform.WorkingDir(), "provision"), filepath.Join(e.terraform.WorkingDir(), "logs"))
kubeconfig, err := runAnsiblePlaybook(
filepath.Join(e.terraform.WorkingDir(), "provision"),
filepath.Join(e.terraform.WorkingDir(), "logs"),
)
log.Info("VSphere Updated")
return kubeconfig, err
}
Expand All @@ -426,7 +444,10 @@ func (e VSphere) Destroy() (err error) {
if err != nil {
return err
}
err = e.terraform.Destroy(context.Background(), tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())))
err = e.terraform.Destroy(
context.Background(),
tfexec.VarFile(fmt.Sprintf("%v/vsphere.tfvars", e.terraform.WorkingDir())),
)
if err != nil {
log.Fatalf("Something went wrong while destroying VSphere cluster project. %v", err)
return err
Expand Down
21 changes: 16 additions & 5 deletions internal/configuration/templates.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,14 @@ func bootstrapTemplate(config *Configuration) error {
}
config.Spec = spec
default:
log.Errorf("Error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner)
return fmt.Errorf("error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner)
log.Errorf(
"Error creating a template configuration file. Parser not found for %v provisioner",
config.Provisioner,
)
return fmt.Errorf(
"error creating a template configuration file. Parser not found for %v provisioner",
config.Provisioner,
)
}
createBase(config)
return nil
Expand Down Expand Up @@ -270,7 +276,6 @@ func clusterTemplate(config *Configuration) error {
Domain: "localdomain",
IPOffset: 0,
},
Boundary: true,
LoadBalancerNode: clustercfg.VSphereKubeLoadBalancer{
Count: 1,
Template: "ubuntu-20.04 # The name of the base image to use for the VMs",
Expand Down Expand Up @@ -321,8 +326,14 @@ func clusterTemplate(config *Configuration) error {
}
config.Spec = spec
default:
log.Errorf("error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner)
return fmt.Errorf("error creating a template configuration file. Parser not found for %v provisioner", config.Provisioner)
log.Errorf(
"error creating a template configuration file. Parser not found for %v provisioner",
config.Provisioner,
)
return fmt.Errorf(
"error creating a template configuration file. Parser not found for %v provisioner",
config.Provisioner,
)
}
createBase(config)
return nil
Expand Down

0 comments on commit 9abe923

Please sign in to comment.