Skip to content

Commit

Permalink
Provisioning station role
Browse files Browse the repository at this point in the history
To make somebox a provisioning station, do
    salt somebox grains.append roles provisioning
    salt somebox state.apply

Then you can connect a fresh machine via ethernet to
the provisioning station, and provision the new device
by running "provision.sh".
  • Loading branch information
Christopher Biggs committed Mar 2, 2017
1 parent 4ff805a commit d59a183
Show file tree
Hide file tree
Showing 13 changed files with 337 additions and 13 deletions.
55 changes: 55 additions & 0 deletions provision.sh
@@ -0,0 +1,55 @@
#!/bin/bash
if [ "$1" = "" ]
then
echo "usage: $0 role"
exit 1
fi

[ -n "$LOG" ] || LOG=warning

banner "Looking for salt minion on target"
salt-run -l $LOG manage.up | tee .out
MINIONS_FOUND="`grep -e '^- ' .out | wc -l`"

if [ "$MINIONS_FOUND" -ge 1 ]
then
echo "Already have $MINIONS_FOUND connected salt minion(s)"
else
rm -f $HOME/.ssh/known_hosts
echo "Installing salt-minion on target"
salt-ssh -l $LOG -i target state.apply salt.new_minion pillar="{\"salt_minion\": {\"master_host\": \"192.168.0.1\"}, \"roles\": $ROLE}" | tee .out
if grep 'Failed: *0' .out >/dev/null
then
echo "INFO: target was updated with salt-minion"
else
echo "ERROR: could not install salt-minion"
exit 1
fi

while :
do
echo "Waiting for new minion to connect"
salt-run manage.up | tee .out
MINIONS_FOUND="`grep -e '^- ' .out | wc -l`"
if [ "$MINIONS_FOUND" -ge 1 ]
then
break
fi
sleep 5
done
fi

echo "Applying states to new minion"

salt -l $LOG -G 'new_minion:1' state.apply | tee .out
if grep '# of minions with errors:.* 0' .out >/dev/null && grep '# of minions that did not return:.* 0' .out >/dev/null
then
echo "INFO: target was updated with desired states"
else
echo "ERROR: could not apply desired states"
exit 1
fi

echo "Erasing provisioning diversions on minion"
salt -G 'new_minion:1' state.apply salt.new_minion_reset

9 changes: 9 additions & 0 deletions roster
@@ -0,0 +1,9 @@
#
# Roster file used with provision.sh and salt-ssh to install salt-minion on a bare Raspbian system
#
target:
host: 192.168.0.1
user: pi
passwd: raspberry
sudo: True

9 changes: 0 additions & 9 deletions srv/pillar/mqtt-relay.sls

This file was deleted.

28 changes: 27 additions & 1 deletion srv/pillar/salt-minion.sls → srv/pillar/salt.sls
@@ -1,3 +1,11 @@
#
# Set the location of your top level salt master here.
#
# Mine is a Raspberry Pi called "Tweety"
#
salt_syndic:
master_host: tweety.local

#
# Define the location of official saltstack packages.
#
Expand All @@ -8,11 +16,29 @@
# every release, so you have to simply hard-code the
# most appropriate package for supported distributions.
#
salt-minion:
salt_minion:
master_host: tweety.local
{% if grains['os'] == 'Ubuntu' and grains['osarch'] == 'amd64' %}
apt_repo_path: http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest
dist_codename: xenial
{% elif grains['os_family'] == 'Debian' and grains['osarch'] == 'armhf' %}
apt_repo_path: https://repo.saltstack.com/apt/debian/8/armhf/latest
dist_codename: jessie
{%endif%}

#
# Set up provisining server config
#
salt_provision:
public_interface: wlan0
interface: eth0
repo: https://github.com/unixbigot/kevin.git
ssh_secret_key: PASTE_HERE_id_provision
ssh_public_key: PASTE_HERE_id_provision.pub
git_secret_key: PASTE_HERE_id_github
address: 192.168.0.1
netmask: 255.255.255.0
dhcp_start: 192.168.0.100
dhcp_end: 192.168.0.100
dhcp_lease: 5m
target: 192.168.0.100
19 changes: 19 additions & 0 deletions srv/salt/base/salt/master-local.conf
@@ -0,0 +1,19 @@
#
# Support multi-tier masters
#
order_masters: True
#
# Add some extra diagnostic output
#
state_events: True
presence_events: True
cli_summary: True
#
# Set the location of state and pillar files
#
file_roots:
base:
- srv/salt/base
pillar_roots:
base:
- srv/pillar
21 changes: 21 additions & 0 deletions srv/salt/base/salt/master.sls
@@ -0,0 +1,21 @@
salt-master:
pkgrepo.managed:
- humanname: SaltStack Repo
- name: deb {{pillar.salt_minion.apt_repo_path}} {{pillar.salt_minion.dist_codename}} main
- dist: {{pillar.salt_minion.dist_codename}}
- key_url: {{pillar.salt_minion.apt_repo_path}}/SALTSTACK-GPG-KEY.pub
- file: /etc/apt/sources.list.d/saltstack.list
pkg.installed:
- pkgs:
- salt-master
- salt-ssh
file.managed:
- name: /etc/salt/master.d/local.conf
- source: salt://salt/master-local.conf
- replace: False
service.running:
- enable: True
- watch:
- file: salt-master


1 change: 1 addition & 0 deletions srv/salt/base/salt/minion-local.conf
@@ -1 +1,2 @@
master: {{salt_master_ip}}
hash_type: sha256
6 changes: 3 additions & 3 deletions srv/salt/base/salt/minion.sls
@@ -1,9 +1,9 @@
salt-minion:
pkgrepo.managed:
- humanname: SaltStack Repo
- name: deb {{ pillar['salt-minion']['apt_repo_path'] }} {{ pillar['salt-minion']['dist_codename'] }} main
- dist: {{ pillar['salt-minion']['dist_codename'] }}
- key_url: {{ pillar['salt-minion']['apt_repo_path'] }}/SALTSTACK-GPG-KEY.pub
- name: deb {{pillar.salt_minion.apt_repo_path}} {{pillar.salt_minion.dist_codename}} main
- dist: {{pillar.salt_minion.dist_codename}}
- key_url: {{pillar.salt_minion.apt_repo_path}}/SALTSTACK-GPG-KEY.pub
- file: /etc/apt/sources.list.d/saltstack.list
pkg:
- installed
Expand Down
121 changes: 121 additions & 0 deletions srv/salt/base/salt/provision.sls
@@ -0,0 +1,121 @@
dnsmasq:
pkg.installed:
- pkgs:
- dnsmasq
service.running:
- enable: True
- listen:
- file: /etc/network/hosts-{{pillar.salt_provision.interface}}
- file: /etc/dnsmasq.d/{{pillar.salt_provision.interface}}

/etc/network/interfaces:
file.comment:
- regex: ^iface eth0

/etc/network/interfaces.d/{{pillar.salt_provision.interface}}:
file.managed:
- source: salt://salt/provision_interface.conf
- makedirs: True
- template: jinja
- context:
interface: {{pillar.salt_provision.interface}}
address: {{pillar.salt_provision.address}}
netmask: {{pillar.salt_provision.netmask}}
cmd.run:
- name: ifdown {{pillar.bootstrap.interface}} && ifup {{pillar.bootstrap.interface}}
- onchanges:
- file: /etc/network/interfaces
- file: /etc/network/interfaces.d/{{pillar.salt_provision.interface}}

ip_forwarding:
sysctl.present:
- name: net.ipv4.ip_forward
- value: 1
iptables.set_policy:
- chain: FORWARD
- policy: ACCEPT

ip_masquerading:
iptables.append:
- table: nat
- chain: POSTROUTING
- out-interface: {{pillar.salt_provision.public_interface}}
- jump: MASQUERADE

/etc/network/hosts-{{pillar.salt_provision.interface}}:
file.managed:
- source: salt://salt/provision_hosts.conf
- template: jinja
- contents:
- {{pillar.salt_provision.address}} salt
- {{target}} target

/etc/dnsmasq.d/{{pillar.salt_provision.interface}}:
file.managed:
- source: salt://salt/provision_dnsmasq.conf
- template: jinja
- contents:
- interface={{pillar.salt_provision.interface}}
- dhcp-range={{pillar.salt_provision.dhcp_start}},{{pillar.salt_provision.dhcp_end}},{{pillar.salt_provision.dhcp_lease}}
- no-hosts
- addn-hosts=/etc/network/hosts-{{pillar.salt_provision.interface}}

ssh_config:
file.managed:
- name: /home/pi/.ssh/config
- user: pi
- group: pi
- mode: 600
- makedirs: True
- contents:
- Host: target
- " User: pi"

provision_ssh_id:
file.managed:
- name: /home/pi/.ssh/id_provision
- user: pi
- group: pi
- mode: 600
- makedirs: True
- contents:
- {{pillar.salt_provision.ssh_secret_key}}

provision_ssh_pub:
file.managed:
- name: /home/pi/.ssh/id_provision.pub
- user: pi
- group: pi
- mode: 644
- makedirs: True
- contents:
- {{pillar.salt_provision.ssh_public_key}}

salt_provision_repo:
git.latest:
- name: {{pillar.salt_provision.repo}}
- target: /home/pi/salt
- submodules: True
- identity: {{pillar.salt_provision.git_secret_key}}
- user: pi

salt_provision_symlink:
file.symlink:
- name: /srv
- target: /home/pi/salt/srv

salt_provision_conf:
file.managed:
- name: /etc/salt/master.d/salt_provision_server.conf
- user: root
- group: root
- mode: 644
- contents:
- auto_accept: True

salt_provision_restart:
service.running:
- name: salt-master
- watch:
- file: salt_provision_conf
- file: salt_provision_symlink
23 changes: 23 additions & 0 deletions srv/salt/base/salt/syndic.sls
@@ -0,0 +1,23 @@
salt-syndic:
pkgrepo.managed:
- humanname: SaltStack Repo
- name: deb {{ pillar['salt-minion']['apt_repo_path'] }} {{ pillar['salt-minion']['dist_codename'] }} main
- dist: {{ pillar['salt-minion']['dist_codename'] }}
- key_url: {{ pillar['salt-minion']['apt_repo_path'] }}/SALTSTACK-GPG-KEY.pub
- file: /etc/apt/sources.list.d/saltstack.list
pkg:
- installed
- require:
- pkgrepo: salt-syndic
file.managed:
- name: /etc/salt/master.d/syndic.conf
- source: salt://salt/master-syndic.conf
- replace: False
- contents:
- syndic_master: {{salt_syndic.master_host}}
service.running:
- enable: True
- watch:
- file: salt-syndic


5 changes: 5 additions & 0 deletions srv/salt/base/top.sls
Expand Up @@ -16,4 +16,9 @@ base:
- match: grain
- dev.golang
- dev.nodejs
roles:provisioning:
- match: grain
- salt.master
- salt.syndic
- salt.provisioning_server

31 changes: 31 additions & 0 deletions srv/salt/new_minion.sls
@@ -0,0 +1,31 @@
include:
- salt.minion

new_minion_id:
file.managed:
- name: /etc/salt/minion_id
- contents:
- {{pillar.roles[0] + '_' + grains.hwaddr_interfaces.eth0|replace(":","")}}

new_minion_grains:
file.serialize:
- name: /etc/salt/grains
- formatter: yaml
- user: root
- group: root
- mode: 644
- merge_if_exists: true
- dataset:
new_minion: 1
roles:
{% for role in pillar.roles %}
- {{role}}
{% endfor %}

new_minion_restart:
service.running:
- name: salt-minion
- watch:
- file: new_minion_id
- file: new_minion_grains

22 changes: 22 additions & 0 deletions srv/salt/new_minion_reset.sls
@@ -0,0 +1,22 @@
#
# Once a minion has been provisioned from a provisioning station
# using provision.sh, erase the temporary configuration that
# made the new minion use the provisioning station as its master.
#
new_minion_reset_options:
file.replace:
- name: /etc/salt/minion.d/options.conf
- pattern: "master: {{pillar.salt_provision.master_host}}"
- repl: "master: {{pillar.salt_minion.master_host}}"

new_minion_reset_grain:
grains.absent:
- name: new_minion

new_minion_reset_key:
file.absent:
- name: /etc/salt/pki/minion/minion_master.pub




0 comments on commit d59a183

Please sign in to comment.