Skip to content

Commit

Permalink
Temp Commit
Browse files Browse the repository at this point in the history
* scaffolding 942

Resolves: harvester/tests#942
  • Loading branch information
Mike Russell committed Oct 10, 2023
1 parent 7eb2e43 commit 0f3f1ff
Show file tree
Hide file tree
Showing 49 changed files with 3,447 additions and 0 deletions.
Empty file.
4 changes: 4 additions & 0 deletions vagrant-pxe-airgap-registry-harvester/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Notes

- ansible-galaxy collection install ansible.posix
- region on minio is being set, but in the ui will display as empty...
163 changes: 163 additions & 0 deletions vagrant-pxe-airgap-registry-harvester/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
VAGRANTFILE_API_VERSION = "2"
# vagrant-libvirt should be specified
ENV['VAGRANT_DEFAULT_PROVIDER'] = "libvirt"

@root_dir = File.dirname(File.expand_path(__FILE__))
@settings = YAML.load_file(File.join(@root_dir, "settings.yml"))
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|

config.vm.graceful_halt_timeout = 120

config.vm.define :pxe_server_kea do |pxe_server_kea|
pxe_server_kea.vm.box = 'generic/debian10'
pxe_server_kea.vm.hostname = 'pxe-server-kea'

pxe_server_kea.vm.network 'private_network',
:ip => @settings['kea_server']['main_network']['ip'],
:libvirt__guest_ipv6 => "no",
:libvirt__dhcp_enabled => false,
:libvirt__network_name => 'harvester-airgap',
:autostart => true,
:libvirt__always_destroy => true

# pxe_server_kea.vm.network 'private_network',
# :ip => '192.168.5.254',
# :libvirt__guest_ipv6 => "no",
# :libvirt__dhcp_enabled => false,
# :libvirt__network_name => 'non-airgap-net-temp',
# :autostart => true,
# :libvirt__always_destroy => true

pxe_server_kea.vm.provider :libvirt do |libvirt|
libvirt.cpu_mode = 'host-passthrough'
libvirt.memory = '8192'
libvirt.cpus = '4'
libvirt.nic_model_type = 'e1000'
libvirt.storage :file, :size => @settings['kea_server']['storage']['minio']['size'], :device => @settings['kea_server']['storage']['minio']['device']
libvirt.storage :file, :size => @settings['kea_server']['storage']['caddy_fileserver']['size'], :device => @settings['kea_server']['storage']['caddy_fileserver']['device']
libvirt.storage :file, :size => @settings['kea_server']['storage']['tftp']['size'], :device => @settings['kea_server']['storage']['tftp']['device']
end

pxe_server_kea.vm.provision :ansible do |ansible|
ansible.playbook = 'ansible/setup_pxe_server_kea.yml'
ansible.verbose ="vvv"
ansible.extra_vars = {
settings: @settings
}
end

end

cluster_node_index = @settings['harvester_cluster_nodes'] - 1
(0..cluster_node_index).each do |node_number|
vm_name = "harvester-node-#{node_number}"
config.vm.define vm_name, autostart: false do |harvester_node|
harvester_node.vm.hostname = "harvester-node-#{node_number}"
harvester_node.vm.network 'private_network',
libvirt__network_name: 'harvester-airgap',
mac: @settings['harvester_network_config']['cluster'][node_number]['mac']

harvester_node.vm.provider :libvirt do |libvirt|
libvirt.cpu_mode = 'host-passthrough'
libvirt.memory = @settings['harvester_network_config']['cluster'][node_number].key?('memory') ? @settings['harvester_network_config']['cluster'][node_number]['memory'] : @settings['harvester_node_config']['memory']
libvirt.cpus = @settings['harvester_network_config']['cluster'][node_number].key?('cpu') ? @settings['harvester_network_config']['cluster'][node_number]['cpu'] : @settings['harvester_node_config']['cpu']
libvirt.storage :file,
size: @settings['harvester_network_config']['cluster'][node_number].key?('disk_size') ? @settings['harvester_network_config']['cluster'][node_number]['disk_size'] : @settings['harvester_node_config']['disk_size'],
type: 'qcow2',
bus: 'virtio',
device: 'vda'
boot_network = {'network' => 'harvester-airgap'}
libvirt.boot 'hd'
libvirt.boot boot_network
# NOTE: default to UEFI boot. Comment this out for legacy BIOS.
libvirt.loader = '/usr/share/qemu/OVMF.fd'
libvirt.nic_model_type = 'e1000'
end
end
end

# config.vm.define :tinybox do |tinybox"|
# tinybox.vm.box = 'generic/debian10'
# tinybox.vm.hostname = 'tinybox'
# # set tiny box on the private network harvester-airgap with dhcp enabled
# tinybox.vm.network 'private_network',
# libvirt__network_name: 'harvester-airgap',
# mac: '02:00:00:71:29:7B'

# end
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.

# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
# config.vm.box = "base"

# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false

# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# NOTE: This will enable public access to the opened port
# config.vm.network "forwarded_port", guest: 80, host: 8080

# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine and only allow access
# via 127.0.0.1 to disable public access
# config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"

# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"

# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"

# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"

# Disable the default share of the current code directory. Doing this
# provides improved isolation between the vagrant box and your host
# by making sure your Vagrantfile isn't accessable to the vagrant box.
# If you use this you may want to enable additional shared subfolders as
# shown above.
# config.vm.synced_folder ".", "/vagrant", disabled: true

# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
# config.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
# vb.memory = "1024"
# end
#
# View the documentation for the provider you are using for more
# information on available options.

# Enable provisioning with a shell script. Additional provisioners such as
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
# documentation for more information about their specific syntax and use.
# config.vm.provision "shell", inline: <<-SHELL
# apt-get update
# apt-get install -y apache2
# SHELL
end
5 changes: 5 additions & 0 deletions vagrant-pxe-airgap-registry-harvester/ansible.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[defaults]
stdout_callback = yaml
interpreter_python = auto_silent
host_key_checking = False
enable_task_debugger = True
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
---
- name: create "Booting Node {{ node_number}}" message
shell: >
figlet "Booting Node {{ node_number }}" 2>/dev/null || echo "Booting Node {{ node_number }}"
register: figlet_result

- name: print "Booting Node {{ node_number }}"
debug:
msg: "{{ figlet_result.stdout }}"

- name: set Harvester Node IP fact
set_fact:
harvester_node_ip: "{{ harvester_network_config['cluster'][node_number | int]['ip'] }}"

- name: boot Harvester Node {{ node_number }}
shell: >
VAGRANT_LOG=info vagrant up harvester-node-{{ node_number }}
register: harvester_node_boot_result

- name: wait for Harvester Node {{ harvester_node_ip }} to get ready
uri:
url: "https://{{ harvester_node_ip }}"
validate_certs: no
status_code: 200
timeout: 120
register: auth_modes_lookup_result
until: auth_modes_lookup_result.status == 200
retries: 35
delay: 120
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
- name: Setup Caddy to serve files
block:
- name: mention what we are doing here
ansible.builtin.shell: >
echo "Set up Caddy to server files.."
register: mention_what_we_are_doing_here_result

- name: Print return information from the previous task
ansible.builtin.debug:
var: mention_what_we_are_doing_here_result
verbosity: 2
ignore_errors: true

- name: install additional packages needed via apt
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- debian-keyring
- debian-archive-keyring
- apt-transport-https

- name: add caddy gpg key to host
ansible.builtin.apt_key:
url: https://dl.cloudsmith.io/public/caddy/stable/gpg.key
state: present

- name: add caddy to etc sources list
ansible.builtin.apt_repository:
repo: "deb [arch=amd64] https://dl.cloudsmith.io/public/caddy/stable/deb/debian any-version main"
state: present

- name: install caddy
ansible.builtin.apt:
name: caddy
state: present
update_cache: true

- name: stop caddy systemd service
ansible.builtin.systemd:
name: caddy.service
state: stopped
daemon_reload: yes
enabled: yes

- name: copy over the Caddyfile template to /etc/caddy/Caddyfile and overwrite force if needed
ansible.builtin.template:
src: Caddyfile.j2
dest: /etc/caddy/Caddyfile
owner: caddy
group: caddy
mode: '0644'
force: yes

- name: copy over the version.yaml template to the caddy directory
ansible.builtin.template:
src: version.yaml.j2
dest: "{{ settings.kea_server.storage.caddy_fileserver.mount_point }}/version.yaml"
owner: caddy
group: caddy
mode: '0644'

- name: download artifacts into the caddy_fileserver mount_point do our best but ignore errors for now
ansible.builtin.get_url:
url: "{{ item }}"
dest: "{{ settings.kea_server.storage.caddy_fileserver.mount_point }}/{{ item | basename }}"
owner: caddy
group: caddy
loop:
- "{{ settings.kea_server.caddy_fileserver_config.harvester_iso_to_download }}"
- "{{ settings.kea_server.caddy_fileserver_config.harvester_sha512 }}"
ignore_errors: true

- name: loop over the vm images and download them into the caddy_fileserver mount_point again try our best but ignore errors for now
ansible.builtin.get_url:
url: "{{ item }}"
dest: "{{ settings.kea_server.storage.caddy_fileserver.mount_point }}/{{ item | basename }}"
owner: caddy
group: caddy
loop: "{{ settings.kea_server.caddy_fileserver_config.vm_images_to_download }}"
ignore_errors: true

- name: head the sha512 sum file and keep the result
ansible.builtin.shell: |
head -n 1 {{ settings.kea_server.storage.caddy_fileserver.mount_point }}/*sha512 | grep -o '^\S*'
register: head_the_sha512_sum_file_result

- name: replace REPLACE_CHECKSUM text in version.yaml in caddy_fileserver mount_point with the head_the_sha512_sum_file_result.stdout
ansible.builtin.replace:
path: "{{ settings.kea_server.storage.caddy_fileserver.mount_point }}/version.yaml"
regexp: 'REPLACE_CHECKSUM'
replace: "{{ head_the_sha512_sum_file_result.stdout }}"
backup: no

- name: build a harvester directory within the caddy base directory
ansible.builtin.file:
path: "{{ settings.kea_server.storage.caddy_fileserver.mount_point }}/{{ settings.kea_server.caddy_fileserver_config.harvester_folder_name }}"
state: directory
owner: caddy
group: caddy

- name: build the harvester upgrade node directory name within the caddy harvester base directory
ansible.builtin.file:
path: "{{ settings.kea_server.storage.caddy_fileserver.mount_point }}/{{ settings.kea_server.caddy_fileserver_config.harvester_folder_name }}/{{ settings.kea_server.caddy_fileserver_config.harvester_new_node_post_upgrade_folder }}"
state: directory
owner: caddy
group: caddy

- name: change ownership recursively of caddy_fileserver mount_point
ansible.builtin.file:
path: "{{ settings.kea_server.storage.caddy_fileserver.mount_point }}"
owner: caddy
group: caddy
recurse: yes

- name: start up caddy systemd service again
ansible.builtin.systemd:
name: caddy.service
state: started
daemon_reload: yes
enabled: yes

rescue:
- name: Print when errors
ansible.builtin.debug:
msg: 'I caught an error in configuring vm further'
always:
- name: Always do this
ansible.builtin.debug:
msg: "This always executes"
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
:80 {

root * {{ settings['kea_server']['storage']['caddy_fileserver']['mount_point'] }}

file_server browse
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: harvesterhci.io/v1beta1
kind: Version
metadata:
name: {{ settings['kea_server']['caddy_fileserver_config']['version_metadata_name'] }}
namespace: harvester-system
spec:
isoChecksum: REPLACE_CHECKSUM
isoURL: http://{{ settings['kea_server']['main_network']['ip'] }}/{{ settings['kea_server']['caddy_fileserver_config']['harvester_iso_name'] }}
releaseDate: '20230921'
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
nameserver 1.1.1.1
nameserver 8.8.8.8
nameserver 8.8.4.4
Loading

0 comments on commit 0f3f1ff

Please sign in to comment.