diff --git a/overlay/generic/lib/svc/method/svc-zones b/overlay/generic/lib/svc/method/svc-zones new file mode 100644 index 000000000..c73bd4441 --- /dev/null +++ b/overlay/generic/lib/svc/method/svc-zones @@ -0,0 +1,308 @@ +#!/sbin/sh +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# +# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2012, Joyent Inc. All rights reserved. + +set -o xtrace + +. /lib/svc/share/smf_include.sh + +ZPOOL=`svcprop -p config/zpool svc:/system/smartdc/init:default 2>/dev/null` +ZPOOL=${ZPOOL:-zones} + +MAN_CONFIG=/usr/lib/brand/joyent/manifests +MAN_SRCDIR=/lib/svc/manifest +MAN_DESTDIR=/$ZPOOL/manifests + +MAN_DIRS="\ + application \ + milestone \ + network \ + network/dns \ + network/ipsec \ + network/ldap \ + network/loadbalancer \ + network/nfs \ + network/nis \ + network/routing \ + network/rpc \ + network/security \ + network/shares \ + network/smb \ + network/ssl \ + site \ + system \ + system/device \ + system/filesystem \ + system/fm \ + system/install \ + system/security \ + system/svc" + +# +# If we're running off of a live-image, setup a zone-specific collection of +# manifest files which are mounted in a zone's /lib/svc/manifest directory. +# +# Regenerate the manifest data each time this service starts, so that it's +# always in sync with the running platform (and any fixes included there). +# +setup_manifests() +{ + echo "Initializing manifest dir." + + # + # Create the directory hierarchy under the destination directory for SMF + # manifests. This directory used to be a separate ZFS dataset, so if + # such a dataset exists, destroy it. There's no reason for the manifest + # directory to be in its own dataset. + # + rm -rf $MAN_DESTDIR/* + zfs list -H -o name $ZPOOL/manifests >/dev/null 2>&1 + [ $? -eq 0 ] && zfs destroy $ZPOOL/manifests + + rm -rf $MAN_DESTDIR + mkdir -m755 $MAN_DESTDIR + + for dir in $MAN_DIRS + do + mkdir -m755 $MAN_DESTDIR/$dir + done + + if [[ ! -f $MAN_CONFIG ]]; then + echo "Missing zone SMF manifest list." + return + fi + + # + # Process the SMF SVC configuration list to setup the zone-specific + # SMF svcs. + # + nawk -v base=$MAN_SRCDIR -v dest=$MAN_DESTDIR '{ + # Ignore comments and empty lines. + if (substr($1, 1, 1) == "#" || length($1) == 0) + next + + # entry format is: name status + proc_file($1, $2); + } + + # + # Copy the manifest from the global zone to the zone-specific + # manifest area. At the same time, update any manifests whose + # status needs to be changed, based on what our configuration + # file indicates. + # + function proc_file(fname, status) + { + f = base "/" fname + of = dest "/" fname + + while (getline 0) { + # Fix up the console svc to work with zones. + if (fname == "system/console-login.xml") + sub("wscons", "console") + + if (($1 == " 0) + printf("update svc state: %s %s\n", + fname, $0) + } + + print $0 >of + } + close(f) + close(of) + }' $MAN_CONFIG +} + +# +# Return a list of running, non-global zones for which a shutdown via +# "/sbin/init 0" may work (typically only Solaris zones.) +# +shutdown_zones() +{ + zoneadm list -p | nawk -F: '{ + if ($2 != "global") { + print $2 + } + }' +} + +[ ! -x /usr/sbin/zoneadm ] && exit 0 # SUNWzoneu not installed + +if [ -z "$SMF_FMRI" ]; then + echo "this script can only be invoked by smf(5)" + exit $SMF_EXIT_ERR_NOSMF +fi + +# Make sure working directory is / to prevent unmounting problems. +cd / +PATH=/usr/sbin:/usr/bin; export PATH + +case "$1" in +'start') + # + # Generate the manifest, even if no zones, since zones could be + # provisioned later. + # + zfs list -H -o name $ZPOOL >/dev/null 2>&1 + [ $? -eq 0 ] && setup_manifests + + egrep -vs '^#|^global:' /etc/zones/index || exit 0 # no local zones + + # + # Boot the installed zones for which the "autoboot" zone property is + # set and invoke the sysboot hook for all other installed zones. + # + ZONES="" + for zone in `zoneadm list -pi | nawk -F: '{ + if ($3 == "installed") { + print $2 + } + }'`; do + zonecfg -z $zone info autoboot | grep "true" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + [ -z "$ZONES" ] && echo "Booting zones:\c" + ZONES=yes + echo " $zone\c" + + # + # Make sure a site dir exists, it wasn't initially + # being created. + # + zonepath=`zonecfg -z $zone info zonepath | cut -d: -f2` + [ ! -d $zonepath/site ] && mkdir -m755 $zonepath/site + + # + # zoneadmd puts itself into its own contract so + # this service will lose sight of it. We don't + # support restart so it is OK for zoneadmd to + # to be in an orphaned contract. + # + zoneadm -z $zone boot & + else + zoneadm -z $zone sysboot & + fi + done + + # + # Wait for all zoneadm processes to finish before allowing the + # start method to exit. + # + wait + [ -n "$ZONES" ] && echo . + ;; + +'stop') + egrep -vs '^#|^global:' /etc/zones/index || exit 0 # no local zones + [ "`zoneadm list`" = "global" ] && exit 0 # no zones running + + SVC_TIMEOUT=`svcprop -p stop/timeout_seconds $SMF_FMRI` + + # + # First, try shutting down any running zones for which an "init 0" may + # work. + # + MAXSHUT=`expr 3 \* $SVC_TIMEOUT \/ 4` # 3/4 of time to zone shutdown + MAXHALT=`expr $SVC_TIMEOUT \/ 4` # rest of time goes to halt + + zonelist=`shutdown_zones` + + if [ -n "$zonelist" ]; then + SHUTDOWN=0 + echo "Shutting down running zones (for up to $MAXSHUT" \ + "seconds):\c" + + for zone in $zonelist; do + echo " $zone\c" + zlogin -S $zone /sbin/init 0 < /dev/null >&0 2>&0 & + SHUTDOWN=1 + done + + [ $SHUTDOWN -eq 1 ] && echo "." + + # Allow time for zones to shutdown cleanly + + while [ $MAXSHUT -gt 0 -a "`shutdown_zones`" != "" ]; do + MAXSHUT=`expr $MAXSHUT - 1` + sleep 1 # wait a bit longer + done + fi + + # + # Second, try halting any non-global zones still running + # + WAITPIDS="" + for zone in `zoneadm list`; do + if [ "$zone" != "global" ]; then + [ -z "$WAITPIDS" ] && + echo "Zones failed to shutdown; trying to halt " \ + "(for up to $MAXHALT seconds):\c" + echo " $zone\c" + zoneadm -z $zone halt & + WAITPIDS="$WAITPIDS $!" + fi + done + [ ! -z "$WAITPIDS" ] && echo . + + # Wait for the 'zoneadm halt' commands to complete. We will let this + # run forever, since the restart daemon will eventually kill us off + # anyway if the halts do not complete after a certain period of time. + wait $WAITPIDS + + # If the halts complete but a zone is still not shutdown, it might + # be in a state like 'shutting_down' or 'down'. So we give it some + # time to come all the way down. + + while [ $MAXHALT -gt 0 -a "`zoneadm list`" != "global" ]; do + MAXHALT=`expr $MAXHALT - 1` + sleep 1 # wait a bit longer + done + + # If there are any remaining file systems in zones, try to unmount them. + umountall -Z + + # + # Report on zones which failed to shutdown. + # + for zone in `zoneadm list`; do + if [ "$zone" != "global" ]; then + echo "Zone '$zone' failed to halt." + fi + done + [ "`zoneadm list`" != "global" ] && exit 1 # zones still running + ;; + +*) + echo "Usage: $0 { start | stop }" + exit 1 + ;; +esac +exit 0