Permalink
Switch branches/tags
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
202 lines (157 sloc) 7.88 KB
# This file can be used directly by 'phd', see 'build-all.sh' in this
# directory for how it can be invoked. The only requirement is a list
# of nodes you'd like it to modify.
#
# The scope of each command-block is controlled by the preceeding
# 'target' line.
#
# - target=all
# The commands are executed on evey node provided
#
# - target=local
# The commands are executed from the node hosting phd. When not
# using phd, they should be run from some other independant host
# (such as the puppet master)
#
# - target=$PHD_ENV_nodes{N}
# The commands are executed on the Nth node provided.
# For example, to run on only the first node would be target=$PHD_ENV_nodes1
#
# Tasks to be performed at this step include:
#################################
# Scenario Requirements Section #
#################################
= REQUIREMENTS =
nodes: 1
= VARIABLES =
PHD_VAR_deployment
PHD_VAR_osp_configdir
PHD_VAR_network_domain
PHD_VAR_network_internal
PHD_VAR_network_hosts_gateway
######################
# Deployment Scripts #
######################
= SCRIPTS =
target=all
....
if [ $PHD_VAR_deployment = segregated ]; then
echo "We don't document managed compute nodes in a segregated environment yet"
# Certainly none of the location constraints would work and the
# resource-discovery options are mostly redundant
exit 1
fi
mkdir -p /etc/pacemaker
cp $PHD_VAR_osp_configdir/pcmk-authkey /etc/pacemaker/authkey
....
target=$PHD_ENV_nodes1
....
# add NovaEvacuate. It must be A/P and that is perfectly acceptable
# to avoid need of a cluster wide locking
pcs resource create nova-evacuate ocf:openstack:NovaEvacuate auth_url=http://vip-keystone:35357/v2.0/ username=admin password=keystonetest tenant_name=admin
# without any of those services, nova-evacuate is useless
# later we also add a order start on nova-compute (after -compute is defined)
for i in vip-glance vip-cinder vip-neutron vip-nova vip-db vip-rabbitmq vip-keystone cinder-volume; do
pcs constraint order start $i then nova-evacuate
done
for i in glance-api-clone neutron-metadata-agent-clone nova-conductor-clone; do
pcs constraint order start $i then nova-evacuate require-all=false
done
# Take down the ODP control plane
pcs resource disable openstack-keystone --wait=240
# Take advantage of the fact that control nodes will already be part of the cluster
# At this step, we need to teach the cluster about the compute nodes
#
# This requires running commands on the cluster based on the names of the compute nodes
controllers=$(cibadmin -Q -o nodes | grep uname | sed s/.*uname..// | awk -F\" '{print $1}')
for controller in ${controllers}; do
pcs property set --node ${controller} osprole=controller
done
# Force services to run only on nodes with osprole = controller
#
# Importantly it also tells Pacemaker not to even look for the services on other
# nodes. This helps reduce noise and collisions with services that fill the same role
# on compute nodes.
stonithdevs=$(pcs stonith | awk '{print $1}')
for i in $(cibadmin -Q --xpath //primitive --node-path | tr ' ' '\n' | awk -F "id='" '{print $2}' | awk -F "'" '{print $1}' | uniq); do
found=0
if [ -n "$stonithdevs" ]; then
for x in $stonithdevs; do
if [ $x = $i ]; then
found=1
fi
done
fi
if [ $found = 0 ]; then
pcs constraint location $i rule resource-discovery=exclusive score=0 osprole eq controller
fi
done
# Now (because the compute nodes have roles assigned to them and keystone is
# stopped) it is safe to define the services that will run on the compute nodes
# neutron-openvswitch-agent
pcs resource create neutron-openvswitch-agent-compute systemd:neutron-openvswitch-agent --clone interleave=true --disabled --force
pcs constraint location neutron-openvswitch-agent-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs constraint order start neutron-server-clone then neutron-openvswitch-agent-compute-clone require-all=false
# libvirtd
pcs resource create libvirtd-compute systemd:libvirtd --clone interleave=true --disabled --force
pcs constraint location libvirtd-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs constraint order start neutron-openvswitch-agent-compute-clone then libvirtd-compute-clone
pcs constraint colocation add libvirtd-compute-clone with neutron-openvswitch-agent-compute-clone
# openstack-ceilometer-compute
pcs resource create ceilometer-compute systemd:openstack-ceilometer-compute --clone interleave=true --disabled --force
pcs constraint location ceilometer-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs constraint order start ceilometer-notification-clone then ceilometer-compute-clone require-all=false
pcs constraint order start libvirtd-compute-clone then ceilometer-compute-clone
pcs constraint colocation add ceilometer-compute-clone with libvirtd-compute-clone
# nfs mount for nova-compute shared storage
pcs resource create nova-compute-fs Filesystem device="${PHD_VAR_network_internal}.1:$PHD_VAR_osp_configdir/instances" directory="/var/lib/nova/instances" fstype="nfs" options="v3" op start timeout=240 --clone interleave=true --disabled --force
pcs constraint location nova-compute-fs-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs constraint order start ceilometer-compute-clone then nova-compute-fs-clone
pcs constraint colocation add nova-compute-fs-clone with ceilometer-compute-clone
# nova-compute
pcs resource create nova-compute ocf:openstack:NovaCompute auth_url=http://vip-keystone:35357/v2.0/ username=admin password=keystonetest tenant_name=admin domain=${PHD_VAR_network_domain} op start timeout=300 --clone interleave=true --disabled --force
pcs constraint location nova-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
pcs constraint order start nova-conductor-clone then nova-compute-clone require-all=false
pcs constraint order start nova-compute-fs-clone then nova-compute-clone require-all=false
pcs constraint colocation add nova-compute-clone with nova-compute-fs-clone
pcs constraint order start nova-compute-clone then nova-evacuate require-all=false
case ${PHD_VAR_network_hosts_gateway} in
east-*)
pcs stonith create fence-compute fence_apc ipaddr=east-apc login=apc passwd=apc pcmk_host_map="east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;" --force
;;
mrg-*)
pcs stonith create fence-compute fence_apc_snmp ipaddr=apc-ap7941-l2h3.mgmt.lab.eng.bos.redhat.com power_wait=10 pcmk_host_map="mrg-07:10;mrg-08:12;mrg-09:14"
;;
esac
pcs stonith create fence-nova fence_compute auth-url=http://vip-keystone:35357/v2.0/ login=admin passwd=keystonetest tenant-name=admin domain=${PHD_VAR_network_domain} record-only=1 action=off --force
# while this is set in basic.cluster, it looks like OSPd doesn't set it.
pcs resource defaults resource-stickiness=INFINITY
# allow compute nodes to rejoin the cluster automatically
# 1m might be a bit aggressive tho
pcs property set cluster-recheck-interval=1min
for node in ${PHD_ENV_nodes}; do
found=0
short_node=$(echo ${node} | sed s/\\..*//g)
for controller in ${controllers}; do
if [ ${short_node} = ${controller} ]; then
found=1
fi
done
if [ $found = 0 ]; then
# We only want to execute the following _for_ the compute nodes, not _on_ the compute nodes
# Rather annoying
pcs resource create ${short_node} ocf:pacemaker:remote reconnect_interval=60 op monitor interval=20
pcs property set --node ${short_node} osprole=compute
pcs stonith level add 1 ${short_node} fence-compute,fence-nova
fi
done
pcs resource enable openstack-keystone
pcs resource enable neutron-openvswitch-agent-compute
pcs resource enable libvirtd-compute
pcs resource enable openstack-ceilometer-compute
pcs resource enable nova-compute-fs
pcs resource enable nova-compute
# cleanup after us
sleep 60
pcs resource cleanup
....