Skip to content

Commit

Permalink
Custom install target (#43)
Browse files Browse the repository at this point in the history
* allow installer target to be specified

First pass in resolving
#42
work still needs to be done to allow the deploy directory to change to
fully support upgrades.

* rearrange variables

* allow cluster directory to be specified

* fix issues

* ensure cluster dir is always present and reachable

* make directory owned by ssh user; use sudo chmod

* address review comments

* fix remote-exec list

* consolidate remote-exec

* update tests

* make get-args do what it says it will do

Create a function to ensure that a directory is reachable
and call it where needed

* fix reachability script

* set directory in image tests
  • Loading branch information
arewm authored and hassenius committed Mar 1, 2019
1 parent d16e58b commit 1f8d8db
Show file tree
Hide file tree
Showing 16 changed files with 245 additions and 219 deletions.
4 changes: 4 additions & 0 deletions README.md
Expand Up @@ -33,6 +33,8 @@ If the default SSH user is not the root user, the default user must have passwor
|local-hooks | |No |Locally run hooks at different stages in the cluster setup process. See below for details|
|on_hook_failure |fail | |Behavior when hooks fail. Anything other than `fail` will `continue`|
|install-verbosity | |No | Verbosity of the icp ansible installer. -v to -vvvv. See ansible documentation for verbosity information |
|install-command |install |No | Installer command to run |
|cluster-directory |/opt/ibm/cluster |No | Location to use for the cluster directory |
|cluster_dir_owner | |No |Username to own cluster directory after an install. Defaults to `ssh_user`|
| **Terraform to cluster ssh configuration**|
|ssh_user |root |No |Username for Terraform to ssh into the ICP cluster. This is typically the default user with for the relevant cloud vendor|
Expand Down Expand Up @@ -303,6 +305,8 @@ To avoid breaking existing templates which depends on the module it is recommend


### Versions and changes
- Allow cluster directory to be specified
- Allow other targets to be called from `icp-inception`
- Fix issues when owner of cluster files are something other than `ssh_user`
- Allow the cluster directory to be owned by arbitrary user after install

Expand Down
75 changes: 35 additions & 40 deletions main.tf
Expand Up @@ -57,6 +57,23 @@ resource "null_resource" "icp-cluster" {

## icp-boot-preconfig hooks are run before icp-docker, if defined

# To make script parameters more consistent we'll define a common set here
locals {
script_options = "${join(" -", list(""), compact(list(
var.icp-inception == "" ? "" : "i ${var.icp-inception}",
var.cluster-directory == "" ? "" : "d ${var.cluster-directory}",
var.install-verbosity == "" ? "" : "l ${var.install-verbosity}",
var.install-command == "" ? "" : "c ${var.install-command}",
var.image_location_user == "" ? "" : "u ${var.image_location_user}",
var.image_location_pass == "" ? "" : "p ${var.image_location_pass}",
var.image_location == "" ? "" : "l ${var.image_location}",
length(var.image_locations) == 0 ? "" : "l ${join(" -l ", var.image_locations )}",
var.docker_package_location == "" ? "" : "o ${var.docker_package_location}",
var.docker_image_name == "" ? "" : "k ${var.docker_image_name}",
var.docker_version == "" ? "" : "s ${var.docker_version}"
)))}"
}

resource "null_resource" "icp-docker" {
depends_on = ["null_resource.icp-boot-preconfig-continue-on-fail", "null_resource.icp-boot-preconfig-stop-on-fail", "null_resource.icp-cluster"]

Expand All @@ -68,13 +85,10 @@ resource "null_resource" "icp-docker" {
agent = "${var.ssh_agent}"
bastion_host = "${var.bastion_host}"
}



provisioner "remote-exec" {
inline = [
"mkdir -p /tmp/icp-bootmaster-scripts",
"sudo mkdir -p /opt/ibm/cluster",
"sudo chown -R ${var.ssh_user} /opt/ibm"
"mkdir -p /tmp/icp-bootmaster-scripts"
]
}

Expand All @@ -87,22 +101,11 @@ resource "null_resource" "icp-docker" {
provisioner "remote-exec" {
inline = [
"chmod a+x /tmp/icp-bootmaster-scripts/*.sh",
"/tmp/icp-bootmaster-scripts/install-docker.sh ${var.docker_package_location != "" ? "-p \"${var.docker_package_location}\"" : ""} ${var.docker_image_name != "" ? "-i ${var.docker_image_name}" : ""} ${var.docker_version != "" ? "-v ${var.docker_version}" : ""}"
"/tmp/icp-bootmaster-scripts/install-docker.sh ${local.script_options}"
]
}
}

# To make image-load more readable we'll do some interpolations here
locals {
load_image_options = "${join(" -", list(""), compact(list(
var.icp-inception == "" ? "" : "i ${var.icp-inception}",
var.image_location_user == "" ? "" : "u ${var.image_location_user}",
var.image_location_pass == "" ? "" : "p ${var.image_location_pass}",
var.image_location == "" ? "" : "l ${var.image_location}",
length(var.image_locations) == 0 ? "" : "l ${join(" -l ", var.image_locations )}"
)))}"
}

resource "null_resource" "icp-image" {
depends_on = ["null_resource.icp-docker"]

Expand All @@ -118,7 +121,7 @@ resource "null_resource" "icp-image" {
provisioner "remote-exec" {
inline = [
"echo \"Loading image ${var.icp-inception} ${var.image_location}\"",
"/tmp/icp-bootmaster-scripts/load-image.sh ${local.load_image_options}"
"/tmp/icp-bootmaster-scripts/load-image.sh ${local.script_options}"
]
}
}
Expand Down Expand Up @@ -167,27 +170,26 @@ resource "null_resource" "icp-config" {

provisioner "remote-exec" {
inline = [
"/tmp/icp-bootmaster-scripts/copy_cluster_skel.sh ${var.icp-inception == "" ? "" : " -v ${var.icp-inception}"}",
"chmod 600 /opt/ibm/cluster/ssh_key",
"python /tmp/icp-bootmaster-scripts/load-config.py ${var.config_strategy} ${random_string.generated_password.result}"
"/tmp/icp-bootmaster-scripts/copy_cluster_skel.sh ${local.script_options}",
"python /tmp/icp-bootmaster-scripts/load-config.py ${var.cluster-directory} ${var.config_strategy} ${random_string.generated_password.result}"
]
}

# Copy the provided or generated private key
provisioner "file" {
content = "${var.generate_key ? tls_private_key.icpkey.private_key_pem : var.icp_priv_key}"
destination = "/opt/ibm/cluster/ssh_key"
destination = "${var.cluster-directory}/ssh_key"
}


# Since the file provisioner deals badly with empty lists, we'll create the optional management nodes differently
# Later we may refactor to use this method for all node types for consistency
provisioner "remote-exec" {
inline = [
"echo -n ${join(",", var.icp-master)} > /opt/ibm/cluster/masterlist.txt",
"echo -n ${join(",", var.icp-proxy)} > /opt/ibm/cluster/proxylist.txt",
"echo -n ${join(",", var.icp-worker)} > /opt/ibm/cluster/workerlist.txt",
"echo -n ${join(",", var.icp-management)} > /opt/ibm/cluster/managementlist.txt"
"chmod 600 ${var.cluster-directory}/cluster/ssh_key",
"echo -n ${join(",", var.icp-master)} > ${var.cluster-directory}/masterlist.txt",
"echo -n ${join(",", var.icp-proxy)} > ${var.cluster-directory}/proxylist.txt",
"echo -n ${join(",", var.icp-worker)} > ${var.cluster-directory}/workerlist.txt",
"echo -n ${join(",", var.icp-management)} > ${var.cluster-directory}/managementlist.txt"
]
}

Expand Down Expand Up @@ -222,13 +224,6 @@ resource "null_resource" "icp-generate-hosts-files" {

# Boot node and local hooks are run before install if defined

# To make install options more readable we'll do some interpolations here
locals {
install_options = "${join(" -", list(""), compact(list(
var.icp-inception == "" ? "" : "v ${var.icp-inception}",
var.install-verbosity == "" ? "" : "l ${var.install-verbosity}"
)))}"
}
# Start the installer
resource "null_resource" "icp-install" {
depends_on = ["null_resource.local-preinstall-hook-continue-on-fail", "null_resource.local-preinstall-hook-stop-on-fail", "null_resource.icp-generate-hosts-files"]
Expand All @@ -245,7 +240,7 @@ resource "null_resource" "icp-install" {

provisioner "remote-exec" {
inline = [
"/tmp/icp-bootmaster-scripts/start_install.sh ${local.install_options}"
"/tmp/icp-bootmaster-scripts/start_install.sh ${local.script_options}"
]
}
}
Expand Down Expand Up @@ -291,9 +286,9 @@ resource "null_resource" "icp-worker-scaler" {
provisioner "remote-exec" {
inline = [
"chmod a+x /tmp/icp-bootmaster-scripts/scaleworkers.sh",
"sudo chown ${var.ssh_user}:${var.ssh_user} -R /opt/ibm/cluster/",
"/tmp/icp-bootmaster-scripts/scaleworkers.sh ${var.icp-inception}"
"sudo chown ${local.cluster_dir_owner}:${local.cluster_dir_owner} -R /opt/ibm/cluster/",
"sudo chown ${var.ssh_user}:${var.ssh_user} -R ${var.cluster-directory}",
"/tmp/icp-bootmaster-scripts/scaleworkers.sh ${var.icp-inception}",
"sudo chown ${local.cluster_dir_owner}:${local.cluster_dir_owner} -R ${var.cluster-directory}"
]
}
}
Expand All @@ -312,7 +307,7 @@ resource "null_resource" "icp-cluster-owner" {

provisioner "remote-exec" {
inline = [
"sudo chown ${local.cluster_dir_owner}:${local.cluster_dir_owner} -R /opt/ibm/cluster/",
"sudo chown ${local.cluster_dir_owner}:${local.cluster_dir_owner} -R ${var.cluster-directory}",
]
}
}
}
26 changes: 7 additions & 19 deletions scripts/boot-master/copy_cluster_skel.sh
@@ -1,23 +1,12 @@
#!/bin/bash
LOGFILE=/tmp/copyclusterskel.log
target="/opt/ibm"
exec 3>&1
exec > >(tee -a ${LOGFILE} >/dev/null) 2> >(tee -a ${LOGFILE} >&3)

echo "Script started with inputs $@"

while getopts ":v:t:" arg; do
case "${arg}" in
v)
icp_version=${OPTARG}
;;
t)
target=${OPTARG}
;;
esac
done

source /tmp/icp-bootmaster-scripts/functions.sh
source /tmp/icp-bootmaster-scripts/get-args.sh

# If loaded from tarball, icp version may not be specified in terraform
if [[ -z "${icp_version}" ]]; then
Expand All @@ -29,12 +18,11 @@ fi
parse_icpversion ${icp_version}
echo "registry=${registry:-not specified} org=$org repo=$repo tag=$tag"

# Ensure that /opt/ibm is present and copy default data directory
sudo mkdir -p ${target}
sudo chown $(whoami):$(whoami) -R ${target}
docker run -e LICENSE=accept -v ${target}:/data ${registry}${registry:+/}${org}/${repo}:${tag} cp -r cluster /data
# Ensure that all data copied from installer has proper ownership
sudo chown $(whoami):$(whoami) -R ${target}
# Copy the default data to the cluster directory
docker run -e LICENSE=accept -v /tmp/icp:/data ${registry}${registry:+/}${org}/${repo}:${tag} cp -r cluster /data
ensure_directory_reachable ${cluster_dir}
sudo mv /tmp/icp/cluster ${cluster_dir}
sudo chown $(whoami):$(whoami) -R ${cluster_dir}

# Take a backup of original config file, to keep a record of original settings and comments
cp ${target}/cluster/config.yaml ${target}/cluster/config.yaml-original
cp ${cluster_dir}/config.yaml ${cluster_dir}/config.yaml-original
11 changes: 11 additions & 0 deletions scripts/boot-master/functions.sh
Expand Up @@ -58,3 +58,14 @@ function get_inception_image() {
image=$(docker image list | grep -m 1 inception | awk '{ print $1 ":" $2 }')
echo $image
}

function ensure_directory_reachable() {
# Ensure that the directory exists and is reachable:
# 1) It is owned by the current user
# 2) All parent directories are executable
test_dir=$1
sudo mkdir -p ${test_dir}
sudo chown $(whoami):$(whoami) ${test_dir}
f=${test_dir}
while [[ $f != / ]]; do sudo chmod a+x "$f"; f=$(dirname "$f"); done;
}
51 changes: 25 additions & 26 deletions scripts/boot-master/generate_hostsfiles.sh
Expand Up @@ -3,11 +3,10 @@ LOGFILE=/tmp/generate_hostsfiles.log
exec 3>&1
exec > >(tee -a ${LOGFILE} >/dev/null) 2> >(tee -a ${LOGFILE} >&3)

WORKDIR=/opt/ibm/cluster
ICPDIR=$WORKDIR
source /tmp/icp-bootmaster-scripts/get-args.sh

# Make sure ssh key has correct permissions set before using
chmod 600 ${WORKDIR}/ssh_key
chmod 600 ${cluster_dir}/ssh_key

# Global array variable for holding all cluster ip/hostnames
declare -A cluster
Expand All @@ -19,25 +18,25 @@ declare -A cluster
read_from_groupfiles() {
## First compile a list of all nodes in the cluster with ip addresses and associated hostnames
declare -a master_ips
IFS=', ' read -r -a master_ips <<< $(cat ${WORKDIR}/masterlist.txt)
IFS=', ' read -r -a master_ips <<< $(cat ${cluster_dir}/masterlist.txt)

declare -a worker_ips
IFS=', ' read -r -a worker_ips <<< $(cat ${WORKDIR}/workerlist.txt)
IFS=', ' read -r -a worker_ips <<< $(cat ${cluster_dir}/workerlist.txt)

declare -a proxy_ips
IFS=', ' read -r -a proxy_ips <<< $(cat ${WORKDIR}/proxylist.txt)
IFS=', ' read -r -a proxy_ips <<< $(cat ${cluster_dir}/proxylist.txt)

## First gather all the hostnames and link them with ip addresses
declare -A workers
for worker in "${worker_ips[@]}"; do
workers[$worker]=$(ssh -o StrictHostKeyChecking=no -i ${WORKDIR}/ssh_key ${worker} hostname)
workers[$worker]=$(ssh -o StrictHostKeyChecking=no -i ${cluster_dir}/ssh_key ${worker} hostname)
cluster[$worker]=${workers[$worker]}
printf "%s %s\n" "$worker" "${cluster[$worker]}" >> /tmp/hosts
done

declare -A proxies
for proxy in "${proxy_ips[@]}"; do
proxies[$proxy]=$(ssh -o StrictHostKeyChecking=no -i ${WORKDIR}/ssh_key ${proxy} hostname)
proxies[$proxy]=$(ssh -o StrictHostKeyChecking=no -i ${cluster_dir}/ssh_key ${proxy} hostname)
cluster[$proxy]=${proxies[$proxy]}
printf "%s %s\n" "$proxy" "${cluster[$proxy]}" >> /tmp/hosts
done
Expand All @@ -49,51 +48,51 @@ read_from_groupfiles() {
then
masters[$m]=$(hostname)
else
masters[$m]=$(ssh -o StrictHostKeyChecking=no -i ${WORKDIR}/ssh_key ${m} hostname)
masters[$m]=$(ssh -o StrictHostKeyChecking=no -i ${cluster_dir}/ssh_key ${m} hostname)
fi
cluster[$m]=${masters[$m]}
printf "%s %s\n" "$m" "${cluster[$m]}" >> /tmp/hosts
done

# Add management nodes if separate from master nodes
if [[ -s ${WORKDIR}/managementlist.txt ]]
if [[ -s ${cluster_dir}/managementlist.txt ]]
then
declare -a management_ips
IFS=', ' read -r -a management_ips <<< $(cat ${WORKDIR}/managementlist.txt)
IFS=', ' read -r -a management_ips <<< $(cat ${cluster_dir}/managementlist.txt)

declare -A mngrs
for m in "${management_ips[@]}"; do
mngrs[$m]=$(ssh -o StrictHostKeyChecking=no -i ${WORKDIR}/ssh_key ${m} hostname)
mngrs[$m]=$(ssh -o StrictHostKeyChecking=no -i ${cluster_dir}/ssh_key ${m} hostname)
cluster[$m]=${mngrs[$m]}
printf "%s %s\n" "$m" "${cluster[$m]}" >> /tmp/hosts
done
fi

## Generate the hosts file for the ICP installation
echo '[master]' > ${ICPDIR}/hosts
echo '[master]' > ${cluster_dir}/hosts
for master in "${master_ips[@]}"; do
echo $master >> ${ICPDIR}/hosts
echo $master >> ${cluster_dir}/hosts
done

echo >> ${ICPDIR}/hosts
echo '[worker]' >> ${ICPDIR}/hosts
echo >> ${cluster_dir}/hosts
echo '[worker]' >> ${cluster_dir}/hosts
for worker in "${worker_ips[@]}"; do
echo $worker >> ${ICPDIR}/hosts
echo $worker >> ${cluster_dir}/hosts
done

echo >> ${ICPDIR}/hosts
echo '[proxy]' >> ${ICPDIR}/hosts
echo >> ${cluster_dir}/hosts
echo '[proxy]' >> ${cluster_dir}/hosts
for proxy in "${proxy_ips[@]}"; do
echo $proxy >> ${ICPDIR}/hosts
echo $proxy >> ${cluster_dir}/hosts
done

# Add management host entries if separate from master nodes
if [[ ! -z ${management_ips} ]]
then
echo >> ${ICPDIR}/hosts
echo '[management]' >> ${ICPDIR}/hosts
echo >> ${cluster_dir}/hosts
echo '[management]' >> ${cluster_dir}/hosts
for m in "${management_ips[@]}"; do
echo $m >> ${ICPDIR}/hosts
echo $m >> ${cluster_dir}/hosts
done
fi
}
Expand All @@ -109,7 +108,7 @@ read_from_hostgroups() {

# Generate the hostname/ip combination
for node in "${cluster_ips[@]}"; do
cluster[$node]=$(ssh -o StrictHostKeyChecking=no -o ConnectionAttempts=100 -i ${WORKDIR}/ssh_key ${node} hostname)
cluster[$node]=$(ssh -o StrictHostKeyChecking=no -o ConnectionAttempts=100 -i ${cluster_dir}/ssh_key ${node} hostname)
printf "%s %s\n" "$node" "${cluster[$node]}" >> /tmp/hosts
done

Expand All @@ -126,15 +125,15 @@ update_etchosts() {
then
cat /tmp/hosts | cat - /etc/hosts | sed -e "/127.0.1.1/d" | sudo tee /etc/hosts
else
cat /tmp/hosts | ssh -i ${WORKDIR}/ssh_key ${node} 'cat - /etc/hosts | sed -e "/127.0.1.1/d" | sudo tee /etc/hosts'
cat /tmp/hosts | ssh -i ${cluster_dir}/ssh_key ${node} 'cat - /etc/hosts | sed -e "/127.0.1.1/d" | sudo tee /etc/hosts'
fi
done
}


if [[ $( stat -c%s /tmp/icp-host-groups.json ) -gt 2 ]]; then
read_from_hostgroups
elif [[ -s ${WORKDIR}/masterlist.txt ]]; then
elif [[ -s ${cluster_dir}/masterlist.txt ]]; then
read_from_groupfiles
else
echo "Couldn't find any hosts" >&2
Expand Down

0 comments on commit 1f8d8db

Please sign in to comment.