Skip to content

Commit

Permalink
change handling of pricvate networks
Browse files Browse the repository at this point in the history
  • Loading branch information
queglay committed Apr 15, 2019
1 parent cf564cc commit 9d4f5bd
Show file tree
Hide file tree
Showing 9 changed files with 118 additions and 55 deletions.
2 changes: 2 additions & 0 deletions README.md
Expand Up @@ -139,6 +139,8 @@ we can initialise the secrets keys and encrypt.
source ./update_vars.sh --prod
vagrant reload
vagrant ssh
- you should also ensure you have set correct Amazon Machine Image ID's for your regions and for each instance. eg we can query for Softnas like this-
aws ec2 describe-images --region ap-southeast-2 --filters Name=is-public,Values=true Name=name,Values=SoftNAS* Name=description,Values='*Platinum - Consumption - 4.2.3*' --query 'Images[*].{ID:ImageId}'
- Now lets initialise terraform, and run our first terraform apply. Read more about this here for best practice - Your first terraform apply
terraform init
terraform plan -out=plan
Expand Down
63 changes: 56 additions & 7 deletions Vagrantfile
Expand Up @@ -4,7 +4,14 @@
Vagrant.configure("2") do |config|
# Ubuntu 16.04
config.vm.box = "ubuntu/xenial64"
config.vm.box_version = "20190406.0.0"
# networking issues
#config.vm.box = "bento/ubuntu-16.04"
# cant install xserver-xorg-legacy
# config.vm.box = "ubuntu/trusty64"
#config.vm.box = "bento/ubuntu-17.10"
#18 has no rc.local
#config.vm.box = "bento/ubuntu-18.04"
#config.vm.box_version = "20190411.0.0"
#config.ssh.username = "vagrant"
#config.ssh.password = ENV['TF_VAR_vagrant_password']

Expand All @@ -13,12 +20,22 @@ Vagrant.configure("2") do |config|
bridgenic = ENV['TF_VAR_bridgenic']
envtier = ENV['TF_VAR_envtier']
name = ENV['TF_VAR_openfirehawkserver_name']
openfirehawkserver = ENV['TF_VAR_openfirehawkserver']
network = ENV['TF_VAR_network']
#'private'

config.vm.define "ansible_control_"+envtier
config.vagrant.plugins = ['vagrant-disksize', 'vagrant-reload']
config.disksize.size = '50GB'
#config.vm.network "public_network", bridge: "eno1",
config.vm.network "public_network", mac: mac_string, bridge: bridgenic


# #config.vm.network "public_network", bridge: "eno1",
if network == 'public'
config.vm.network "public_network", mac: mac_string, bridge: bridgenic
else
# use a private network mode if you don't have control over the network environment - eg wifi in a cafe / other location.
config.vm.network "private_network", ip: openfirehawkserver
end

# routing issues? https://stackoverflow.com/questions/35208188/how-can-i-define-network-settings-with-vagrant
config.vm.provider "virtualbox" do |vb|
Expand All @@ -36,33 +53,65 @@ Vagrant.configure("2") do |config|
vb.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"]
vb.customize ["modifyvm", :id, "--nicpromisc3", "allow-all"]
end
config.vm.provision "shell", inline: "echo 'source /vagrant/scripts/env.sh' > /etc/profile.d/sa-environment.sh", :run => 'always'
config.vm.provision "shell", inline: "echo DEBIAN_FRONTEND=$DEBIAN_FRONTEND"

## block to install custom private key
config.ssh.insert_key = false
config.ssh.private_key_path = ["keys/my_key_pair_dev.pem", "~/.vagrant.d/insecure_private_key"]
config.vm.provision "file", source: "keys/my_key_pair_dev.pub", destination: "~/.ssh/authorized_keys"
config.vm.provision "shell", inline: <<-EOC
sudo sed -i -e "\\#PasswordAuthentication yes# s#PasswordAuthentication yes#PasswordAuthentication no#g" /etc/ssh/sshd_config
sudo service ssh restart
EOC
## end block for custom private key

config.vm.provision "shell", inline: "export DEBIAN_FRONTEND=noninteractive"
config.vm.provision "shell", inline: "sudo rm /etc/localtime && sudo ln -s /usr/share/zoneinfo/Australia/Brisbane /etc/localtime", run: "always"
config.vm.provision "shell", inline: "sudo apt-get update"
#config.vm.provision "shell", inline: "sudo apt-get upgrade"
config.vm.provision "shell", inline: "sudo apt-get install -y sshpass"

### Install Ansible Block ###
config.vm.provision "shell", inline: "sudo apt-get install -y software-properties-common"
config.vm.provision "shell", inline: "sudo apt-add-repository --yes --update ppa:ansible/ansible"
config.vm.provision "shell", inline: "sudo apt-get install -y ansible='2.7.10-1ppa~xenial'"
config.vm.provision "shell", inline: "sudo apt-get install -y ansible"
# ='2.7.10-1ppa~xenial'"
# we define the location of the ansible hosts file in an environment variable.
config.vm.provision "shell", inline: "grep -qxF 'ANSIBLE_INVENTORY=/vagrant/ansible/hosts' /etc/environment || echo 'ANSIBLE_INVENTORY=/vagrant/ansible/hosts' | sudo tee -a /etc/environment"
#reboot required for desktop to function.
### End Install Ansible Block ###

# config.vm.provision "shell", inline: "sudo apt-get upgrade -y"

# config.vm.provision "shell", inline: "sudo reboot"
# # trigger reload
# config.vm.provision :reload

### Install ubuntu desktop and virtualbox additions. Because a reboot is required, provisioning is handled here. ###
# Install the gui with vagrant or install the gui with ansible installed on the host.
# This creates potentiall issues because ideally, Ansible should be used within the vm only to limit ansible version issues if the user updates vagrant on their host.
config.vm.provision "shell", inline: "sudo apt-get install -y ubuntu-desktop virtualbox-guest-dkms virtualbox-guest-utils virtualbox-guest-x11 xserver-xorg-legacy"
# # Install the gui with vagrant or install the gui with ansible installed on the host.
# # This creates potentiall issues because ideally, Ansible should be used within the vm only to limit ansible version issues if the user updates vagrant on their host.
# install ubuntu
config.vm.provision "shell", inline: "sudo apt-get install -y ubuntu-desktop"
# ...or xfce. pick one.
#config.vm.provision "shell", inline: "sudo apt-get install -y curl xfce4"
config.vm.provision "shell", inline: "sudo apt-get install -y virtualbox-guest-dkms virtualbox-guest-utils virtualbox-guest-x11 xserver-xorg-legacy"

# Permit anyone to start the GUI
config.vm.provision "shell", inline: "sudo sed -i 's/allowed_users=.*$/allowed_users=anybody/' /etc/X11/Xwrapper.config"
#disable the update notifier. We do not want to update to ubuntu 18, currently deadline installer gui doesn't work in 18.
config.vm.provision "shell", inline: "sudo sed -i 's/Prompt=.*$/Prompt=never/' /etc/update-manager/release-upgrades"
### End Ubuntu Desktop block ###
# for dpkg or virtualbox issues, see https://superuser.com/questions/298367/how-to-fix-virtualbox-startup-error-vboxadd-service-failed

config.vm.provision "shell", inline: "sudo reboot"
# trigger reload
config.vm.provision :reload



#config.vm.provision "shell", inline: "sudo VBoxClient-all"
#config.vm.provision "shell", inline: "sudo startxfce4&"

#ansible provissioning
#ansible_inventory_dir = "ansible/hosts"
Expand Down
2 changes: 1 addition & 1 deletion ansible/init.yaml
Expand Up @@ -22,7 +22,7 @@
- role: init-packages
- role: terraform
- role: aws-cli
- {role: nfs-mounts-local, tags: 'nfs-mounts-local'}
#- {role: nfs-mounts-local, tags: 'nfs-mounts-local'}

- hosts: ansible_control
remote_user: vagrant
Expand Down
50 changes: 25 additions & 25 deletions main.tf
Expand Up @@ -196,40 +196,40 @@ variable "pcoip_skip_update" {
# houdini_license_server_address = "${var.houdini_license_server_address}"
# }

module "workstation" {
source = "./modules/workstation_pcoip"
name = "workstation"
# module "workstation" {
# source = "./modules/workstation_pcoip"
# name = "workstation"

#options for gateway type are centos7 and pcoip
gateway_type = "${var.gateway_type}"
vpc_id = "${module.vpc.vpc_id}"
vpc_cidr = "${module.vpc.vpc_cidr_block}"
vpn_cidr = "${var.vpn_cidr}"
remote_ip_cidr = "${var.remote_ip_cidr}"
#public_subnet_ids = "${module.vpc.public_subnets}"
# #options for gateway type are centos7 and pcoip
# gateway_type = "${var.gateway_type}"
# vpc_id = "${module.vpc.vpc_id}"
# vpc_cidr = "${module.vpc.vpc_cidr_block}"
# vpn_cidr = "${var.vpn_cidr}"
# remote_ip_cidr = "${var.remote_ip_cidr}"
# #public_subnet_ids = "${module.vpc.public_subnets}"

bastion_ip = "${module.bastion.public_ip}"
# bastion_ip = "${module.bastion.public_ip}"

key_name = "${var.key_name}"
private_key = "${file("${var.local_key_path}")}"
# key_name = "${var.key_name}"
# private_key = "${file("${var.local_key_path}")}"

#skipping os updates will allow faster rollout for testing, but may be non functional
skip_update = "${var.pcoip_skip_update}"
# #skipping os updates will allow faster rollout for testing, but may be non functional
# skip_update = "${var.pcoip_skip_update}"

public_domain_name = "${var.public_domain}"
# public_domain_name = "${var.public_domain}"

#sleep will stop instances to save cost during idle time.
sleep = "${var.sleep}"
pcoip_sleep_after_creation = "${var.pcoip_sleep_after_creation}"
# #sleep will stop instances to save cost during idle time.
# sleep = "${var.sleep}"
# pcoip_sleep_after_creation = "${var.pcoip_sleep_after_creation}"

private_subnet_ids = "${module.vpc.private_subnets}"
private_subnets_cidr_blocks = "${module.vpc.private_subnets_cidr_blocks}"
remote_subnet_cidr = "${var.remote_subnet_cidr}"
# private_subnet_ids = "${module.vpc.private_subnets}"
# private_subnets_cidr_blocks = "${module.vpc.private_subnets_cidr_blocks}"
# remote_subnet_cidr = "${var.remote_subnet_cidr}"

openfirehawkserver = "${var.openfirehawkserver}"
# openfirehawkserver = "${var.openfirehawkserver}"

houdini_license_server_address = "${var.houdini_license_server_address}"
}
# houdini_license_server_address = "${var.houdini_license_server_address}"
# }

variable "node_skip_update" {
default = false
Expand Down
41 changes: 24 additions & 17 deletions modules/softnas/main.tf
Expand Up @@ -440,23 +440,22 @@ resource "null_resource" "create_ami" {


# While instance is stopped, we attach ebs volumes.
resource "aws_volume_attachment" "softnas1_ebs_att" {
depends_on = ["aws_instance.softnas1", "null_resource.create_ami"]
count = "0"
#count = "${length(local.softnas1_volumes)}"
device_name = "${element(local.softnas1_mounts, count.index)}"
volume_id = "${element(local.softnas1_volumes, count.index)}"
instance_id = "${aws_instance.softnas1.id}"
}
# resource "aws_volume_attachment" "softnas1_ebs_att" {
# depends_on = ["aws_instance.softnas1", "null_resource.create_ami"]
# count = "${length(local.softnas1_volumes)}"
# device_name = "${element(local.softnas1_mounts, count.index)}"
# volume_id = "${element(local.softnas1_volumes, count.index)}"
# instance_id = "${aws_instance.softnas1.id}"
# }

# Start instance so that s3 disks can be attached
resource "null_resource" "start-softnas-after-ebs-attach" {
depends_on = ["aws_volume_attachment.softnas1_ebs_att"]
# # Start instance so that s3 disks can be attached
# resource "null_resource" "start-softnas-after-ebs-attach" {
# depends_on = ["aws_volume_attachment.softnas1_ebs_att"]

provisioner "local-exec" {
command = "aws ec2 start-instances --instance-ids ${aws_instance.softnas1.id}"
}
}
# provisioner "local-exec" {
# command = "aws ec2 start-instances --instance-ids ${aws_instance.softnas1.id}"
# }
# }

# If ebs volumes are attached, don't automatically import the pool. manual intervention may be required.
locals {
Expand Down Expand Up @@ -485,8 +484,8 @@ output "softnas1_private_ip" {
}

resource "null_resource" "provision_softnas_volumes" {
depends_on = ["aws_volume_attachment.softnas1_ebs_att"]

depends_on = ["null_resource.provision_softnas", "null_resource.create_ami"]
# "null_resource.start-softnas-after-ebs-attach"
triggers {
instanceid = "${ aws_instance.softnas1.id }"
}
Expand Down Expand Up @@ -522,6 +521,10 @@ output "provision_softnas_volumes" {
# wakeup a node after sleep
resource "null_resource" "start-softnas" {
count = "${var.sleep ? 0 : 1}"

triggers {
instanceid = "${ aws_instance.softnas1.id }"
}

provisioner "local-exec" {
command = "aws ec2 start-instances --instance-ids ${aws_instance.softnas1.id}"
Expand All @@ -530,6 +533,10 @@ resource "null_resource" "start-softnas" {

resource "null_resource" "shutdown-softnas" {
count = "${var.sleep ? 1 : 0}"

triggers {
instanceid = "${ aws_instance.softnas1.id }"
}

provisioner "local-exec" {
#command = "aws ec2 stop-instances --instance-ids ${aws_instance.softnas1.id}"
Expand Down
3 changes: 2 additions & 1 deletion modules/softnas/variables.tf
Expand Up @@ -87,6 +87,7 @@ variable "selected_ami" {

default = {
low_ap-southeast-2 = "ami-a24a98c0",
high_ap-southeast-2 = "ami-5e7ea03c"
#high_ap-southeast-2 = "ami-5e7ea03c"
high_ap-southeast-2 = "ami-058203bb6a3250775"
}
}
1 change: 1 addition & 0 deletions scripts/env.sh
@@ -0,0 +1 @@
export DEBIAN_FRONTEND=noninteractive
2 changes: 1 addition & 1 deletion secrets
Submodule secrets updated from bab136 to 48b2a6
9 changes: 6 additions & 3 deletions secrets.template
Expand Up @@ -5,7 +5,7 @@
# comments should remain in secrets.txt
# these values will be propogated into the secrets.template schema used to initialise any new secrets.txt file
#
TF_VAR_bridgenic=en5: Thunderbolt Ethernet Slot 1
TF_VAR_bridgenic=en0: Wi-Fi (AirPort)
# Your side fx credentials are used by the node-centos-houdini.yaml script to download and install houdini
TF_VAR_sesi_username='changethisemail@example.com'
TF_VAR_sesi_password='changethispassword'
Expand Down Expand Up @@ -66,11 +66,14 @@ TF_VAR_softnas1_path_abs=/prod_sydney_aws
TF_VAR_openfirehawkserver_prod=192.168.29.13
TF_VAR_deadline_samba_server_address_prod=192.168.29.13
TF_VAR_houdini_license_server_address_prod=192.168.29.13
# if network is set to private, the ip is internal to the host that runs vagrant. otherwise the mac will be used for dhcp on the network when set to public.
TF_VAR_network_dev=private
TF_VAR_network_prod=private
#Openfirehawkserver vm
TF_VAR_openfirehawkserver_ram_dev=4096
TF_VAR_openfirehawkserver_ram_prod=8192
TF_VAR_openfirehawkserver_ram_prod=4096
TF_VAR_openfirehawkserver_vcpus_dev=2
TF_VAR_openfirehawkserver_vcpus_prod=4
TF_VAR_openfirehawkserver_vcpus_prod=2
#openfirehawkserver hostname
TF_VAR_openfirehawkserver_name_dev=openfirehawkserverdev.exampledev.com
TF_VAR_openfirehawkserver_name_prod=openfirehawkserver.dexample.com
Expand Down

0 comments on commit 9d4f5bd

Please sign in to comment.