Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add harvester provider support #1120

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
78 changes: 78 additions & 0 deletions backend_modules/harvester/base/main.tf
@@ -0,0 +1,78 @@

locals {
images_used = var.use_shared_resources ? [] : var.images
image_urls = {
almalinux8o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://repo.almalinux.org"}/almalinux/8/cloud/x86_64/images/AlmaLinux-8-GenericCloud-latest.x86_64.qcow2"
centos6o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud.centos.org"}/centos/6/images/CentOS-6-x86_64-GenericCloud.qcow2"
centos7 = "${var.use_mirror_images ? "http://${var.mirror}" : "https://github.com"}/uyuni-project/sumaform-images/releases/download/4.3.0/centos7.qcow2"
centos7o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud.centos.org"}/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
centos8o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud.centos.org"}/centos/8/x86_64/images/CentOS-8-GenericCloud-8.2.2004-20200611.2.x86_64.qcow2"
amazonlinux2o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cdn.amazonlinux.com"}/os-images/2.0.20210721.2/kvm/amzn2-kvm-2.0.20210721.2-x86_64.xfs.gpt.qcow2"
opensuse152o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://download.opensuse.org"}/distribution/leap/15.2/appliances/openSUSE-Leap-15.2-JeOS.x86_64-OpenStack-Cloud.qcow2"
opensuse153o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://download.opensuse.org"}/distribution/leap/15.3/appliances/openSUSE-Leap-15.3-JeOS.x86_64-OpenStack-Cloud.qcow2"
opensuse153armo = "${var.use_mirror_images ? "http://${var.mirror}" : "https://download.opensuse.org"}/distribution/leap/15.3/appliances/openSUSE-Leap-15.3-ARM-JeOS-efi.aarch64.qcow2"
sles15 = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles15.x86_64.qcow2"
sles15o = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-15-JeOS-GM/SLES15-JeOS.x86_64-15.0-OpenStack-Cloud-GM.qcow2"
sles15sp1 = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles15sp1.x86_64.qcow2"
sles15sp1o = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-15-SP1-JeOS-QU4/SLES15-SP1-JeOS.x86_64-15.1-OpenStack-Cloud-QU4.qcow2"
sles15sp2 = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles15sp2.x86_64.qcow2"
sles15sp2o = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-15-SP2-JeOS-GM/SLES15-SP2-JeOS.x86_64-15.2-OpenStack-Cloud-GM.qcow2"
sles15sp3o = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-15-SP3-JeOS-GM/SLES15-SP3-JeOS.x86_64-15.3-OpenStack-Cloud-GM.qcow2"
sles15sp4o = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-15-SP4-Minimal-GM/SLES15-SP4-Minimal-VM.x86_64-OpenStack-Cloud-GM.qcow2"
sles11sp4 = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles11sp4.x86_64.qcow2"
sles12sp3 = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles12sp3.x86_64.qcow2"
sles12sp4 = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles12sp4.x86_64.qcow2"
sles12sp4o = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-12-SP4-JeOS-GM/SLES12-SP4-JeOS.x86_64-12.4-OpenStack-Cloud-GM.qcow2"
sles12sp5o = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-12-SP5-JeOS-GM/SLES12-SP5-JeOS.x86_64-12.5-OpenStack-Cloud-GM.qcow2"
ubuntu1604o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud-images.ubuntu.com"}/xenial/current/xenial-server-cloudimg-amd64-disk1.img"
ubuntu1804 = "${var.use_mirror_images ? "http://${var.mirror}" : "https://github.com"}/uyuni-project/sumaform-images/releases/download/4.4.0/ubuntu1804.qcow2"
ubuntu1804o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud-images.ubuntu.com"}/bionic/current/bionic-server-cloudimg-amd64.img"
ubuntu2004o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud-images.ubuntu.com"}/focal/current/focal-server-cloudimg-amd64.img"
debian9o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud.debian.org"}/images/cloud/OpenStack/current-9/debian-9-openstack-amd64.qcow2"
debian10o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud.debian.org"}/images/cloud/OpenStack/current-10/debian-10-openstack-amd64.qcow2"
debian11o = "${var.use_mirror_images ? "http://${var.mirror}" : "https://cloud.debian.org"}/cdimage/cloud/bullseye/latest/debian-11-genericcloud-amd64.qcow2"
opensuse152-ci-pr = "${var.use_mirror_images ? "http://${var.mirror}" : "https://download.opensuse.org"}/repositories/systemsmanagement:/sumaform:/images:/libvirt/images/opensuse152-ci-pr.x86_64.qcow2"
opensuse153-ci-pr = "http://minima-mirror.mgr.prv.suse.net/repositories/systemsmanagement:/sumaform:/images:/libvirt/images/opensuse153-ci-pr.x86_64.qcow2"
opensuse153-ci-pr-client = "${var.use_mirror_images ? "http://${var.mirror}" : "https://download.opensuse.org"}/repositories/systemsmanagement:/sumaform:/images:/libvirt/images/opensuse153-ci-pr-client.x86_64.qcow2"
slemicro51-ign = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.opensuse.org"}/repositories/systemsmanagement:/sumaform:/images:/microos/images_51/SUSE-MicroOS.x86_64-sumaform.qcow2"
slemicro51o-ign = "${var.use_mirror_images ? "http://${var.mirror}" : "http://download.suse.de"}/install/SLE-Micro-5.1-GM/SUSE-MicroOS.x86_64-5.1.0-Default-GM.raw.xz"
}
network_name = lookup(var.provider_settings, "network_name", "default")
bridge = lookup(var.provider_settings, "bridge", null)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this variable doesn't look to be used.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right! I will remove it.

additional_network = lookup(var.provider_settings, "additional_network", null)
namespace = lookup(var.provider_settings, "namespace", "default")
}

resource "harvester_image" "images" {
for_each = local.images_used

name = "${var.name_prefix}${each.value}"
display_name = "${var.name_prefix}${each.value}"
namespace = local.namespace
source_type = "download"
url = local.image_urls[each.value]
}

resource "harvester_network" "additional_network" {
count = local.additional_network == null ? 0 : 1
name = "${var.name_prefix}private"
namespace = local.namespace

route_mode = "manual"
route_cidr = local.additional_network
route_gateway = cidrhost(local.additional_network, 1)
vlan_id = 4000
}

output "configuration" {
depends_on = [
harvester_image.images,
harvester_network.additional_network,
]
value = {
additional_network = local.additional_network
additional_network_name = join(",", harvester_network.additional_network.*.name)

network_name = local.network_name
}
}
1 change: 1 addition & 0 deletions backend_modules/harvester/base/variables.tf
9 changes: 9 additions & 0 deletions backend_modules/harvester/base/versions.tf
@@ -0,0 +1,9 @@
terraform {
required_version = ">= 1.1.5"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we really need this terraform version? Currently, sumaform is tested to work with terraform version "1.0.10"

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not really. I just use what is in tumbleweed. I will change it to 1.0.10 but can we at least leave >=? My modus operandi with newly cloned sumaform is to switch all versions to the most recent one. I can't say about testsuite, but regular non-testsuite use works fine with the latest.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good for me, and it should work fine in there.
Also we should update terraform version again :)

required_providers {
harvester = {
source = "harvester/harvester"
version = ">= 0.5.0"
}
}
}
5 changes: 5 additions & 0 deletions backend_modules/harvester/host/combustion-script
@@ -0,0 +1,5 @@
#!/bin/bash
# combustion: network

zypper install qemu-guest-agent

11 changes: 11 additions & 0 deletions backend_modules/harvester/host/config.ign
@@ -0,0 +1,11 @@
{
"ignition": { "version": "3.1.0" },
"passwd": {
"users": [
{
"name": "root",
"passwordHash": "ZIy6Ivgw8UdAs"
}
]
}
}
211 changes: 211 additions & 0 deletions backend_modules/harvester/host/main.tf
@@ -0,0 +1,211 @@
locals {
resource_name_prefix = "${var.base_configuration["name_prefix"]}${var.name}"
provider_settings = merge({
memory = 1024
vcpu = 1
running = true
mac = null
cpu_model = "custom"
},
contains(var.roles, "server") ? { memory = 4096, vcpu = 2 } : {},
contains(var.roles, "server") && lookup(var.base_configuration, "testsuite", false) ? { memory = 8192, vcpu = 4 } : {},
contains(var.roles, "proxy") && lookup(var.base_configuration, "testsuite", false) ? { memory = 2048, vcpu = 2 } : {},
contains(var.roles, "pxe_boot")? { memory = 2048 } : {},
contains(var.roles, "mirror") ? { memory = 1024 } : {},
contains(var.roles, "build_host") ? { vcpu = 2 } : {},
contains(var.roles, "controller") ? { memory = 2048 } : {},
contains(var.roles, "grafana") ? { memory = 4096 } : {},
contains(var.roles, "virthost") ? { memory = 3072, vcpu = 3 } : {},
contains(var.roles, "jenkins") ? { memory = 16384, vcpu = 4 } : {},
var.provider_settings,
contains(var.roles, "virthost") ? { cpu_model = "host-passthrough" } : {},
)
}

data "template_file" "user_data" {
template = file("${path.module}/user_data.yaml")
vars = {
image = var.image
use_mirror_images = var.base_configuration["use_mirror_images"]
mirror = var.base_configuration["mirror"]
install_salt_bundle = var.install_salt_bundle
}
}

data "template_file" "network_config" {
template = file("${path.module}/network_config.yaml")
vars = {
image = var.image
}
}

resource "harvester_volume" "main_disk" {
name = "${local.resource_name_prefix}${var.quantity > 1 ? "-${count.index + 1}" : ""}-main-disk"
image = "${var.base_configuration["use_shared_resources"] ? "" : var.base_configuration["name_prefix"]}${var.image}"
size = 214748364800
count = var.quantity
}

resource "harvester_volume" "data_disk" {
name = "${local.resource_name_prefix}${var.quantity > 1 ? "-${count.index + 1}" : ""}-data-disk"
// needs to be converted to bytes
size = (var.additional_disk_size == null ? 0: var.additional_disk_size) * 1024 * 1024 * 1024
count = var.additional_disk_size == null ? 0 : var.additional_disk_size > 0 ? var.quantity : 0
}

resource "harvester_virtualmachine" "domain" {
name = "${local.resource_name_prefix}${var.quantity > 1 ? "-${count.index + 1}" : ""}"
# libvirt provider takes memory in megabytes, harvester in bytes. Assume when user do not specify suffix then it is in megabytes
memory = length(regex("[kmgKMG]$", local.provider_settings["memory"])) > 0 ? local.provider_settings["memory"] : local.provider_settings["memory"] * 1024 * 1024
cpu = local.provider_settings["vcpu"]
run_strategy = local.provider_settings["running"] ? "Always" : "Halted"
count = var.quantity

dynamic "disk" {
for_each = concat(
length(harvester_volume.main_disk) == var.quantity ? [{"volume_name" : harvester_volume.main_disk[count.index].name}] : [],
length(harvester_volume.data_disk) == var.quantity ? [{"volume_name" : harvester_volume.data_disk[count.index].name}] : []
)
content {
name = disk.value.volume_name
existing_volume_name = disk.value.volume_name
}
}

cloudinit {
user_data = data.template_file.user_data.rendered
network_data = data.template_file.network_config.rendered
}

dynamic "network_interface" {
for_each = slice(
[
{
"wait_for_lease" = true
"network_name" = var.base_configuration["network_name"]
"mac" = local.provider_settings["mac"]
"name" = "base"
},
{
"wait_for_lease" = false
"network_name" = var.base_configuration["additional_network_name"]
"mac" = null
"name" = "additional"
},
],
var.connect_to_base_network ? 0 : 1,
var.base_configuration["additional_network"] != null && var.connect_to_additional_network ? 2 : 1,
)
content {
wait_for_lease = network_interface.value.wait_for_lease
name = network_interface.value.name
network_name = network_interface.value.network_name
mac_address = network_interface.value.mac
}
}
}

resource "null_resource" "provisioning" {
depends_on = [harvester_virtualmachine.domain]

triggers = {
main_volume_id = length(harvester_volume.main_disk) == var.quantity ? harvester_volume.main_disk[count.index].id : null
domain_id = length(harvester_virtualmachine.domain) == var.quantity ? harvester_virtualmachine.domain[count.index].id : null
grains_subset = yamlencode(
{
domain = var.base_configuration["domain"]
use_avahi = var.base_configuration["use_avahi"]
timezone = var.base_configuration["timezone"]
use_ntp = var.base_configuration["use_ntp"]
testsuite = var.base_configuration["testsuite"]
roles = var.roles
use_os_released_updates = var.use_os_released_updates
use_os_unreleased_updates = var.use_os_unreleased_updates
install_salt_bundle = var.install_salt_bundle
additional_repos = var.additional_repos
additional_repos_only = var.additional_repos_only
additional_certs = var.additional_certs
additional_packages = var.additional_packages
swap_file_size = var.swap_file_size
authorized_keys = var.ssh_key_path
gpg_keys = var.gpg_keys
ipv6 = var.ipv6
})
}

count = var.provision ? var.quantity : 0

connection {
host = [ for ni in harvester_virtualmachine.domain[count.index].network_interface : ni.ip_address if length(ni.ip_address) > 0 ][0]
user = "root"
password = "linux"
}

provisioner "file" {
source = "salt"
destination = "/root"
}

provisioner "remote-exec" {
inline = [
"bash /root/salt/wait_for_salt.sh",
]
}

provisioner "file" {
content = yamlencode(merge(
{
hostname = "${local.resource_name_prefix}${var.quantity > 1 ? "-${count.index + 1}" : ""}"
domain = var.base_configuration["domain"]
use_avahi = var.base_configuration["use_avahi"]
additional_network = var.base_configuration["additional_network"]
timezone = var.base_configuration["timezone"]
use_ntp = var.base_configuration["use_ntp"]
testsuite = var.base_configuration["testsuite"]
roles = var.roles
use_os_released_updates = var.use_os_released_updates
use_os_unreleased_updates = var.use_os_unreleased_updates
install_salt_bundle = var.install_salt_bundle
additional_repos = var.additional_repos
additional_repos_only = var.additional_repos_only
additional_certs = var.additional_certs
additional_packages = var.additional_packages
swap_file_size = var.swap_file_size
authorized_keys = concat(
var.base_configuration["ssh_key_path"] != null ? [trimspace(file(var.base_configuration["ssh_key_path"]))] : [],
var.ssh_key_path != null ? [trimspace(file(var.ssh_key_path))] : [],
)
gpg_keys = var.gpg_keys
connect_to_base_network = var.connect_to_base_network
connect_to_additional_network = var.connect_to_additional_network
reset_ids = true
ipv6 = var.ipv6
data_disk_device = contains(var.roles, "server") || contains(var.roles, "proxy") || contains(var.roles, "mirror") || contains(var.roles, "jenkins") ? "vdb" : null
provider = "harvester"
},
var.grains))
destination = "/etc/salt/grains"
}

provisioner "remote-exec" {
inline = [
"bash /root/salt/first_deployment_highstate.sh",
]
}

provisioner "remote-exec" {
inline = [
"bash /root/salt/post_provisioning_cleanup.sh",
]
}
}

output "configuration" {
depends_on = [harvester_virtualmachine.domain, null_resource.provisioning]
value = {
ids = harvester_virtualmachine.domain[*].id
hostnames = [for value_used in harvester_virtualmachine.domain : "${value_used.name}.${var.base_configuration["domain"]}"]
macaddrs = [for value_used in harvester_virtualmachine.domain :
[ for ni in value_used.network_interface : ni.mac_address ] if length(value_used.network_interface) > 0]
}
}
11 changes: 11 additions & 0 deletions backend_modules/harvester/host/network_config.yaml
@@ -0,0 +1,11 @@
network:
version: 1
config:
- type: physical
%{ if image == "ubuntu2004o" || image == "ubuntu1804o" || image == "ubuntu1604o" }
name: ens3
%{ else }
name: eth0
%{ endif }
subnets:
- type: dhcp