Browse files

Factor out common Ubuntu build scripts

  • Loading branch information...
1 parent eb23251 commit 1bbcf7a71c74767c08a5f4851120215170ad86e0 @VictorLowther VictorLowther committed Dec 21, 2011
View
178 ubuntu-10.10-extra/build_lib.sh
@@ -8,181 +8,5 @@ OS=ubuntu
OS_VERSION=10.10
OS_TOKEN="$OS-$OS_VERSION"
OS_CODENAME=maverick
-# Server to download the mirror from if we need to.
-ISO_MIRROR="http://mirror.anl.gov/pub"
-PKG_TYPE="debs"
-PKG_ALLOWED_ARCHES=("amd64" "all")
-CHROOT_PKGDIR="var/cache/apt/archives"
-CHROOT_GEMDIR="var/lib/gems/1.8/cache"
-declare -A SEEN_DEBS
-# The name of the OS iso we are using as a base.
-[[ $ISO ]] || ISO="ubuntu-$OS_VERSION-server-amd64.iso"
-# The location for OS packages on $ISO
-find_cd_pool() ( echo "$IMAGE_DIR/pool"; )
-
-fetch_os_iso() {
- # Try and download our ISO if we don't already have it
- echo "$(date '+%F %T %z'): Downloading and caching $ISO"
- curl -o "$ISO_LIBRARY/$ISO" \
- "$ISO_MIRROR/ubuntu-iso/CDs/$OS_VERSION/$ISO" || \
- die 1 "Missing our source image"
-}
-
-# Have the chroot update its package databases.
-chroot_update() { in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated update; }
-
-# Install some packages in the chroot environment.
-chroot_install() {
- if [[ $1 ]]; then
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated install "$@"
- fi
-}
-
-# Fetch (but do not install) packages into the chroot environment
-chroot_fetch() {
- if [[ $1 ]]; then
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated --download-only install "$@"
- fi
-}
-
-# Add repositories to the local chroot environment.
-add_repos() {
- local repo ppas=()
- local f=$(mktemp /tmp/ubuntu_repos.XXXXXX)
- for repo in "$@"; do
- case $repo in
- ppa*) ppas+=("${ppa#* }");;
- deb*) echo "$repo" >> "$f";;
- *) die "Unknown Debian repository type $repo";;
- esac
- done
- in_chroot mkdir -p /etc/apt/sources.list.d
- sudo cp "$f" "$CHROOT/etc/apt/sources.list.d/${f##*.}.list"
- rm "$f"
- [[ $ppas ]] || return 0
- chroot_install python-software-properties
- for repo in "${ppas[@]}"; do
- in_chroot apt-add-repository "ppa:${repo}"
- done
-}
-
-# Test to see we were passed a valid package file name.
-is_pkg() { [[ $1 = *.deb ]]; }
-
-# Look up name and version information for a package using
-# dpkg. Make sure and memoize things.
-dpkg_info() {
- # $1 = package to examine
- local name arch ver f1 f2
- [[ -f $1 && $1 = *.deb ]] || die "$1 is not a debian package!"
- if [[ ! ${SEEN_DEBS["${1##*/}"]} ]]; then
- while read f1 f2; do
- case $f1 in
- Package:) name="$f2";;
- Version:) ver="$f2";;
- Architecture:) arch="$f2";;
- esac
- [[ $name && $ver && $arch ]] && break || :
- done < <(dpkg -I "$1")
- SEEN_DEBS["${1##*/}"]="$name-$arch $ver"
- fi
- echo "${SEEN_DEBS["${1##*/}"]}"
-}
-
-# Get the package file name in $name-$arch format.
-pkg_name() {
- local n="$(dpkg_info "$1")"
- echo "${n%% *}"
-}
-
-# OS specific part of making our chroot environment.
-__make_chroot() {
- # debootstrap a minimal install of our target version of
- # Ubuntu to ensure that we don't interfere with the host's package cache.
- local d repo bc f
- sudo mount -t tmpfs -o size=4G "$OS_TOKEN-chroot" "$CHROOT"
- sudo debootstrap "$OS_CODENAME" "$CHROOT" \
- "file://$IMAGE_DIR" || \
- die 1 "Could not bootstrap our scratch target!"
- # mount some important directories for the chroot
- for d in proc sys dev dev/pts; do
- bind_mount "/$d" "$CHROOT/$d"
- done
- in_chroot mkdir -p "/base_repo"
- sudo mount --bind "$IMAGE_DIR" "$CHROOT/base_repo"
- # make sure the chroot can resolve hostnames
- sudo cp /etc/resolv.conf "$CHROOT/etc/resolv.conf"
- # make sure the chroot honors proxies
- if [[ $http_proxy || $https_proxy ]]; then
- f=$(mktemp /tmp/apt.http.conf.XXXXXX)
- [[ $http_proxy ]] && echo \
- "Acquire::http::Proxy \"$http_proxy\";" >> "$f"
- [[ $https_proxy ]] && echo \
- "Acquire::https::Proxy \"$https_proxy\";" >> "$f"
- echo "Acquire::http::Proxy::127.0.0.1 \"DIRECT\";" >> "$f"
- in_chroot mkdir -p "/etc/apt/apt.conf.d/"
- sudo cp "$f" "$CHROOT/etc/apt/apt.conf.d/00http_proxy"
- fi
-}
-
-# Test to see of package $1 is more recent than package $2
-pkg_cmp() {
- # $1 = Debian package 1
- # $2 = Debian package 2
- local deb1="$(dpkg_info "$1")"
- local deb2="$(dpkg_info "$2")"
- [[ ${deb1%% *} = ${deb2%% *} ]] || \
- die "$1 and $2 do not reference the same package!"
- vercmp "${deb1#* }" "${deb2#* }"
-}
-
-final_build_fixups() {
- # Copy our isolinux and preseed files.
- mv "$BUILD_DIR/extra/isolinux" "$BUILD_DIR/extra/preseed" "$BUILD_DIR"
- # Copy our initrd images
- debug "Fixing up initrds"
- [[ -d $BUILD_DIR/initrd ]] && rm -rf initrd
- mkdir -p "$BUILD_DIR/initrd"
- # Grab _all_ the nic drivers. We probably don't need them,
- # but a little paranoia never hurt anyone.
- ( cd "$BUILD_DIR/initrd";
- debug "Adding all nic drivers"
- for udeb in "$IMAGE_DIR/pool/main/l/linux/"nic-*-generic-*.udeb; do
- ar x "$udeb"
- tar xzf data.tar.gz
- rm -rf debian-binary *.tar.gz
- done
- # Make sure installing off a USB connected DVD will work
- debug "Adding USB connected DVD support"
- mkdir -p var/lib/dpkg/info
- cp "$CROWBAR_DIR/initrd/cdrom-detect.postinst" var/lib/dpkg/info
- debug "Enabling bootif support for debian-installer"
- mkdir -p lib/debian-installer-startup.d/
- cp "$CROWBAR_DIR/$OS_TO_STAGE-extra/patches/bootif" \
- lib/debian-installer-startup.d/S32set-bootif
- chmod 755 "lib/debian-installer-startup.d/S32set-bootif"
- for initrd in "install/initrd.gz" \
- "install/netboot/ubuntu-installer/amd64/initrd.gz"; do
- [[ -f $IMAGE_DIR/$initrd ]] || continue
- mkdir -p "$BUILD_DIR/${initrd%/*}"
- gunzip -c "$IMAGE_DIR/$initrd" >"$BUILD_DIR/initrd.tmp"
- find . -type f | \
- cpio --format newc --owner root:root \
- -oAF "$BUILD_DIR/initrd.tmp"
- cat "$BUILD_DIR/initrd.tmp" | \
- gzip -9 > "$BUILD_DIR/$initrd"
- done
- rm "$BUILD_DIR/initrd.tmp"
- )
- # rm -rf "$BUILD_DIR/initrd"
-}
-
-# Check to make sure all our prerequisites are met.
-for cmd in debootstrap ar; do
- which "$cmd" &>/dev/null || \
- die "Please install $cmd before trying to build Crowbar."
-done
+. "$CROWBAR_DIR/ubuntu-common/build_lib.sh"
View
581 ubuntu-11.04-extra/build_crowbar.sh
@@ -1,581 +0,0 @@
-#!/bin/bash
-# Copyright 2011, Dell
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Author: VictorLowther
-#
-
-# This script expects to be able to run certian commands as root.
-# Either run it as a user who can sudo to root, or give the user
-# you are running it as the following sudo rights:
-# crowbar-tester ALL = NOPASSWD: /bin/mount, /bin/umount, /usr/sbin/debootstrap, /bin/cp, /usr/sbin/chroot
-
-# When running this script for the first time, it will automatically create a
-# cache directory and try to populate it with all the build dependencies.
-# After that, if you need to pull in new dependencies, you will need to
-# call the script with the --update-cache parameter. If you are going to
-# develop on Crowbar, it is a good idea to put the build cache in its own git
-# repository, and create a branching structure for the packages that mirrors
-# the branching structure in the crowbar repository -- if you do that, then
-# this build script can be smarter about what packages it should pull in
-# whenever you invoke it to build an iso.
-
-# We always use the C language and locale
-export LANG="C"
-export LC_ALL="C"
-
-GEM_RE='([^0-9].*)-([0-9].*)'
-
-readonly currdir="$PWD"
-export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
-
-# Source our config file if we have one
-[[ -f $HOME/.build-crowbar.conf ]] && \
- . "$HOME/.build-crowbar.conf"
-
-# Look for a local one.
-[[ -f build-crowbar.conf ]] && \
- . "build-crowbar.conf"
-
-# Set up our proxies if we were asked to.
-if [[ $USE_PROXY = "1" && $PROXY_HOST ]]; then
- proxy_str="http://"
- if [[ $PROXY_PASSWORD && $PROXY_USER ]]; then
- proxy_str+="$PROXY_USER:$PROXY_PASSWORD@"
- elif [[ $PROXY_USER ]]; then
- proxy_str+="$PROXY_USER@"
- fi
- proxy_str+="$PROXY_HOST"
- [[ $PROXY_PORT ]] && proxy_str+=":$PROXY_PORT"
- [[ $no_proxy ]] || no_proxy="localhost,localhost.localdomain,127.0.0.0/8,$PROXY_HOST"
- [[ $http_proxy ]] || http_proxy="$proxy_str/"
- [[ $https_proxy ]] || https_proxy="$http_proxy"
- export no_proxy http_proxy https_proxy
-else
- unset no_proxy http_proxy https_proxy
-fi
-
-# Next, some configuration variables that can be used to tune how the
-# build process works.
-
-# Barclamps to include. By default, start with jsut crowbar and let
-# the dependency machinery and the command line pull in the rest.
-# Note that BARCLAMPS is an array, not a string!
-[[ $BARCLAMPS ]] || BARCLAMPS=()
-
-[[ $ALLOW_CACHE_UPDATE ]] || ALLOW_CACHE_UPDATE=true
-
-# Location for caches that should not be erased between runs
-[[ $CACHE_DIR ]] || CACHE_DIR="$HOME/.crowbar-build-cache"
-
-# Location to store .iso images that we use in the build process.
-# These are usually OS install DVDs that we will stage Crowbar on to.
-[[ $ISO_LIBRARY ]] || ISO_LIBRARY="$CACHE_DIR/iso"
-
-# This is the location that we will save the generated .iso to.
-[[ $ISO_DEST ]] || ISO_DEST="$PWD"
-
-# Directory that holds our Sledgehammer PXE tree.
-[[ $SLEDGEHAMMER_PXE_DIR ]] || SLEDGEHAMMER_PXE_DIR="$CACHE_DIR/tftpboot"
-
-# Location of the Crowbar checkout we are building from.
-[[ $CROWBAR_DIR ]] || CROWBAR_DIR="${0%/*}"
-[[ $CROWBAR_DIR = /* ]] || CROWBAR_DIR="$currdir/$CROWBAR_DIR"
-[[ -f $CROWBAR_DIR/build_crowbar.sh && -d $CROWBAR_DIR/.git ]] || \
- die "$CROWBAR_DIR is not a git checkout of Crowbar!"
-export CROWBAR_DIR
-
-# Location of the Sledgehammer source tree. Only used if we cannot
-# find Sledgehammer in $SLEDGEHAMMER_PXE_DIR above.
-[[ $SLEDGEHAMMER_DIR ]] || SLEDGEHAMMER_DIR="${CROWBAR_DIR}/../crowbar-sledgehammer"
-
-# Command to run to clean out the tree before starting the build.
-# By default we want to be relatively pristine.
-[[ $VCS_CLEAN_CMD ]] || VCS_CLEAN_CMD='git clean -f -d'
-
-# Arrays holding the additional pkgs and gems populate Crowbar with.
-REPOS=()
-
-declare -A CD_POOL STAGED_POOL INSTALLED_PKGS FORCE_BARCLAMP_UPDATE
-
-# Get the OS we were asked to stage Crowbar on to. Assume it is Ubuntu 10.10
-# unless we specify otherwise.
-OS_TO_STAGE="${1-ubuntu-10.10}"
-shift
-
-
-# Source our common build functions
-. "$CROWBAR_DIR/build_lib.sh" || exit 1
-. "$CROWBAR_DIR/test_lib.sh" || exit 1
-
-# Make sure that we actually know how to build the ISO we were asked to
-# build. If we do not, print a helpful error message.
-if ! [[ $OS_TO_STAGE && -d $CROWBAR_DIR/$OS_TO_STAGE-extra && \
- -f $CROWBAR_DIR/$OS_TO_STAGE-extra/build_lib.sh ]]; then
- cat <<EOF
-You must pass the name of the operating system you want to stage Crowbar
-on to. Valid choices are:
-EOF
- cd "$CROWBAR_DIR"
- for d in *-extra; do
- [[ -d $d && -f $d/build_lib.sh ]] || continue
- echo " ${d%-extra}"
- done
- exit 1
-fi
-
-# Source OS specific build knowledge. This includes:
-# Parameters that build_crowbar.sh needs to know:
-# OS = the distribution we are staging on to, such as redhat or ubuntu.
-# OS_VERSION = the version of the distribution we are staging on to.
-# For redhat, it would be somethibng like 5.6
-# OS_TOKEN = Defaults to "$OS-$OS_VERSION"
-# ISO = the name of the install ISO image we are going to stage Crowbar on to.
-# PKG_TYPE = The package type we should look for in crowbar.yml for OS
-# specific packages.
-# PKG_ALLOWED_ARCHES = The allowed package arches we will look to stage.
-# These are usually whatever the OS equivalent of
-# amd64 and all are.
-# CHROOT_GEMDIR = The location that the OS keeps its gem cache in.
-# CHROOT_PKGDIR = The location that the OS keeps its package cache in.
-# Functions that build_crowbar needs to call:
-# find_cd_pool(): This function should echo the current location of the
-# package pool on $ISO when it is mounted on $IMAGE_DIR.
-# fetch_os_iso(): This function should try to fetch the OS iso from
-# a well-known location on the Internet, and die if it cannot.
-# chroot_update(): This function should ask the chroot to update its
-# package metadata.
-# chroot_install(): This function should try to install the packages passed
-# to it as args.
-# chroot_fetch(): This function should ask to download (but not install)
-# the packages passed to it as arguments and all their dependencies.
-# add_repos(): This function should try to add repositories passed to it
-# as args to the chroot environment.
-# is_pkg(): This function should check to see if the string passed to it
-# is a valid package filename. The file need not actually exist.
-# pkg_name(): This function should extract the package name and arch as the
-# package management system will see them from the file passed as an argument,
-# and echo that information in $name-$arch format.
-# __make_chroot(): This function should handle all of the OS-specific actions
-# needed to set up the package-fetching chroot environment.
-# pkg_cmp(): This function should check to see that both of the files passed to
-# it refer to the same package name. If it does, this function should return
-# 0 if the first package is a higher revision than the second one, and 1 if
-# the first package is the same or lower revision. It should die if the
-# package names are not the same.
-# final_build_fixups(): This function should take wahtever steps are needed
-# to make the default OS install process also ensure that the Crowbar bits
-# are properly staged and to completly automate the admin node install
-# process, either as an install from CD or an install via PXE. This
-# usually entails modifying initrd files, adding kickstarts/install seeds,
-# modifying boot config files, and so on.
-. "$CROWBAR_DIR/$OS_TO_STAGE-extra/build_lib.sh"
-
-# Build OS dependent query strings
-# These have to be created after we know what OS we are building on.
-BC_QUERY_STRINGS["pkgs"]="$PKG_TYPE pkgs"
-BC_QUERY_STRINGS["repos"]="$PKG_TYPE repos"
-BC_QUERY_STRINGS["ppas"]="$PKG_TYPE ppas"
-BC_QUERY_STRINGS["build_pkgs"]="$PKG_TYPE build_pkgs"
-BC_QUERY_STRINGS["raw_pkgs"]="$PKG_TYPE raw_pkgs"
-BC_QUERY_STRINGS["os_pkgs"]="$PKG_TYPE $OS_TOKEN pkgs"
-BC_QUERY_STRINGS["os_repos"]="$PKG_TYPE $OS_TOKEN repos"
-BC_QUERY_STRINGS["os_ppas"]="$PKG_TYPE $OS_TOKEN ppas"
-BC_QUERY_STRINGS["os_build_pkgs"]="$PKG_TYPE $OS_TOKEN build_pkgs"
-BC_QUERY_STRINGS["os_raw_pkgs"]="$PKG_TYPE $OS_TOKEN raw_pkgs"
-
-{
- # Check to make sure our required commands are installed.
- for cmd in sudo chroot mkisofs ruby; do
- which "$cmd" &>/dev/null || \
- die 1 "Please install $cmd before trying to build Crowbar."
- done
-
- # Make sure only one instance of the ISO build runs at a time.
- # Otherwise you can easily end up with a corrupted image.
-
- debug "Acquiring the build lock."
- flock 65
- # Figure out what our current branch is, in case we need to merge
- # other branches in to the iso to create our build.
- CURRENT_BRANCH="$(in_repo git symbolic-ref HEAD)" || \
- die "Not on a branch we can build from!"
- CURRENT_BRANCH=${CURRENT_BRANCH##*/}
- [[ $CURRENT_BRANCH ]] || die "Not on a branch we can merge from!"
-
- # Check and see if our local build repository is a git repo. If it is,
- # we may need to do the same sort of merging in it that we might do in the
- # Crowbar repository.
- if [[ -d $CACHE_DIR/.git ]] && \
- (cd "$CACHE_DIR"; branch_exists master) then
- CURRENT_CACHE_BRANCH=master
- fi
-
- # Parse our options.
- while [[ $1 ]]; do
- case $1 in
- # Merge a list of branches into a throwaway branch with the
- # current branch as a baseline before starting the rest of the
- # build process. This makes it easier to spin up iso images
- # with local changes without having to manually merge those
- # changes in with any other branches of interest first.
- # This code takes heavy advantage of the lightweight nature of
- # git branches and takes care to leave uncomitted changes in place.
- -m|--merge)
- shift
- # Loop through the rest of the arguments, as long as they
- # do not start with a -.
- while [[ $1 && ! ( $1 = -* ) ]]; do
- # Check to make sure that this argument refers to a branch
- # in the crowbar git tree. Die if it does not.
- in_repo branch_exists "$1" || die "$1 is not a git branch!"
- # If we have not already created a throwaway branch to
- # merge these branches into, do so now. If we have
- # uncomitted changes that need to be stashed, do so here.
- if [[ ! $THROWAWAY_BRANCH ]]; then
- THROWAWAY_BRANCH="build-throwaway-$$-$RANDOM"
- REPO_PWD="$PWD"
- if [[ ! $(in_repo git status) =~ working\ directory\ clean ]]; then
- THROWAWAY_STASH=$(in_repo git stash create)
- in_repo git checkout -f .
- fi
- in_repo git checkout -b "$THROWAWAY_BRANCH"
- fi
- # Merge the requested branch into the throwaway branch.
- # Die if the merge failed -- there must have been a
- # conflict, and the user needs to fix it up.
- in_repo git merge "$1" || \
- die "Merge of $1 failed, fix things up and continue"
- shift
- done
- ;;
- # Force an update of the cache
- update-cache|--update-cache) shift;
- need_update=true
- while [[ $1 && $1 != -* ]]; do
- is_barclamp "$1" || \
- die "Cannot update non-barclamp $1."
- FORCE_BARCLAMP_UPDATE["$1"]=true
- unset need_update || : &>/dev/null
- shift
- done;;
- # Pull in additional barclamps.
- --barclamps)
- shift
- while [[ $1 && $1 != -* ]]; do
- BARCLAMPS+=("$1")
- shift
- done;;
- --test)
- NEED_TEST=true
- test_params=()
- shift
- while [[ $1 && $1 != -* ]]; do
- test_params+=("$1")
- shift
- done;;
- --ci)
- [[ $CI_BARCLAMP ]] && die "Already asked to perform CI on $CI_BARCLAMP, and we can only do one at a time."
- shift
- is_barclamp "$1" || \
- die "$1 is not a barclamp, cannot perform CI testing on it."
- CI_BARCLAMP="$1"
- shift
- if [[ $1 && $1 != -* ]]; then
- in_ci_barclamp branch_exists "$1" || \
- die "$1 is not a branch in $CI_BARCLAMP, cannot perform integration testing!"
- CI_BRANCH="$1"
- shift
- else
- CI_BRANCH="master"
- fi;;
- --shrink)
- type shrink_iso >&/dev/null || \
- die "The build system does not know how to shrink $OS_TO_STAGE"
- SHRINK_ISO=true
- shift;;
- --generate-minimal-install)
- type generate_minimal_install &>/dev/null || \
- die "The build system does not know how to generate a minimal install list for $OS_TO_STAGE!"
- GENERATE_MINIMAL_INSTALL=true
- shift;;
- --no-cache-update) shift; ALLOW_CACHE_UPDATE=false;;
- --no-iso) shift; NO_GENERATE_ISO=true;;
- *) die "Unknown command line parameter $1";;
- esac
- done
-
- # If we stached changes to the crowbar repo, apply them now.
- [[ $THROWAWAY_STASH ]] && in_repo git stash apply "$THROWAWAY_STASH"
-
- # Finalize where we expect to find our caches and out chroot.
- # If they were set in one of the conf files, don't touch them.
-
- # The directory we perform a minimal install into if we need
- # to refresh our gem or pkg caches
- [[ $CHROOT ]] || CHROOT="$CACHE_DIR/$OS_TOKEN/chroot"
-
- # Make sure that the $OS_TOKEN directory exist.
- mkdir -p "$CACHE_DIR/$OS_TOKEN"
-
- # The directory we will stage the build into.
- [[ $BUILD_DIR ]] || \
- BUILD_DIR="$CACHE_DIR/$OS_TOKEN/build"
- # The directory that we will mount the OS .ISO on .
- [[ $IMAGE_DIR ]] || \
- IMAGE_DIR="$CACHE_DIR/$OS_TOKEN/image"
-
- # Directory where we will look for our package lists
- [[ $PACKAGE_LISTS ]] || PACKAGE_LISTS="$BUILD_DIR/extra/packages"
-
- # Proxy Variables
- [[ $USE_PROXY ]] || USE_PROXY=0
- [[ $PROXY_HOST ]] || PROXY_HOST=""
- [[ $PROXY_PORT ]] || PROXY_PORT=""
- [[ $PROXY_USER ]] || PROXY_USER=""
- [[ $PROXY_ESC_USER ]] || PROXY_ESC_USER=""
- [[ $PROXY_PASSWORD ]] || PROXY_PASSWORD=""
-
- # Version for ISO
- [[ $VERSION ]] || VERSION="$(cd "$CROWBAR_DIR"; git describe --long --tags)-dev"
-
- # Name of the built iso we will build
- [[ $BUILT_ISO ]] || BUILT_ISO="crowbar-${VERSION}.iso"
-
- if [[ $CI_BARCLAMP ]]; then
- in_ci_barclamp git checkout -b ci-throwaway-branch || \
- die "Could not check out throwaway branch for CI testing on $CI_BARCLAMP"
- in_ci_barclamp git merge "$CI_BRANCH" || \
- die "$CI_BRANCH does not merge cleanly in $CI_BARCLAMP. Please fix this before continuing"
- if [[ $CI_BRANCH != master ]]; then
- in_ci_barclamp git merge master || \
- die "$CI_BRANCH does not merge cleanly into master on $CI_BARCLAMP. Please fix."
- fi
- NEED_TEST=true
- test_params=("$CI_BARCLAMP")
- fi
-
- # If we were not passed a list of barclamps to include,
- # pull in all of the ones declared as submodules.
- [[ $BARCLAMPS ]] || BARCLAMPS=($(cd "$CROWBAR_DIR"
- while read sha submod branch; do
- [[ $submod = barclamps/* ]] || continue
- [[ -f $submod/crowbar.yml ]] || \
- echo "Cannot find crowbar.yml for $submod, exiting."
- echo "${submod##*/}"
- done < <(git submodule status))
- )
-
- if [[ $CI_BARCLAMP ]]; then
- is_in "$CI_BARCLAMP" "${BARCLAMPS[@]}" || BARCLAMPS+=("$CI_BARCLAMP")
- fi
- # Pull in barclamp information
- get_barclamp_info
-
- # Make any directories we don't already have
- for d in "$ISO_LIBRARY" "$ISO_DEST" "$IMAGE_DIR" "$BUILD_DIR" \
- "$SLEDGEHAMMER_PXE_DIR" "$CHROOT"; do
- mkdir -p "$d"
- done
-
- debug "Checking for Sledgehammer."
- # Make sure Sledgehammer has already been built and pre-staged.
- if ! [[ -f $SLEDGEHAMMER_DIR/bin/sledgehammer-tftpboot.tar.gz || \
- -f $SLEDGEHAMMER_PXE_DIR/initrd0.img ]]; then
- echo "Slegehammer TFTP image missing!"
- echo "Please build Sledgehammer from $SLEDGEHAMMER_DIR before building Crowbar."
- exit 1
- fi
-
- # Fetch the OS ISO if we need to.
- [[ -f $ISO_LIBRARY/$ISO ]] || fetch_os_iso
-
- # Start with a clean slate.
- clean_dirs "$IMAGE_DIR" "$BUILD_DIR" "$CHROOT"
-
- debug "Cleaning up any VCS cruft."
- # Clean up any cruft that the editor may have left behind.
- (for d in "$CROWBAR_DIR" "$CROWBAR_DIR/barclamps/"*; do
- cd "$d"; $VCS_CLEAN_CMD
- done)
-
- # Make additional directories we will need.
- for d in discovery extra/pkgs extra/files; do
- mkdir -p "$BUILD_DIR/$d"
- done
-
- # Mount our ISO for the build process.
- debug "Mounting $ISO"
- sudo mount -t iso9660 -o loop "$ISO_LIBRARY/$ISO" "$IMAGE_DIR" || \
- die "Could not mount $ISO"
- debug "Indexing CD package pool."
- index_cd_pool
-
- # Copy over the Crowbar bits and their prerequisites
- cp -r "$CROWBAR_DIR/extra"/* "$BUILD_DIR/extra"
- cp -r "$CROWBAR_DIR/$OS_TOKEN-extra"/* "$BUILD_DIR/extra"
- cp -r "$CROWBAR_DIR/change-image"/* "$BUILD_DIR"
-
- # Add critical build meta information to build-info
- echo "build-timestamp: $(date '+%F %T %z')" > "$BUILD_DIR/build-info"
- echo "build-os: $OS_TOKEN" >>"$BUILD_DIR/build-info"
- echo "build-os-iso: $ISO" >>"$BUILD_DIR/build-info"
- echo "crowbar: $(get_rev "$CROWBAR_DIR")" >>"$BUILD_DIR/build-info"
-
- # Make sure that all our barclamps are properly staged.
- for bc in "${BARCLAMPS[@]}"; do
- is_barclamp "$bc" || die "Cannot find barclamp $bc!"
- debug "Staging $bc barclamp."
- for cache in pkg gem raw_pkg file; do
- checker="barclamp_${cache}_cache_needs_update"
- updater="update_barclamp_${cache}_cache"
- [[ $(type $checker) = "$checker is a function"* ]] || \
- die "Asked to check $cache cache, but no checker function!"
- [[ $(type $updater) = "$updater is a function"* ]] || \
- die "Might need to update $cache cache, but no updater!"
- if $checker "$bc"; then
- [[ $ALLOW_CACHE_UPDATE = true ]] || \
- die "Need up update $cache cache for $bc, but updates are disabled."
- debug "Updating $cache cache for $bc"
- [[ $cache =~ ^(pkg|gem)$ ]] && make_chroot
- $updater "$bc"
- fi
- done
- echo "barclamps/$bc: $(get_rev "$CROWBAR_DIR/barclamps/$bc")" >> "$BUILD_DIR/build-info"
- done
- # Once all our barclamps have had their packages staged, create tarballs of them.
- mkdir -p "$BUILD_DIR/dell/barclamps"
- "$CROWBAR_DIR/package_barclamp.sh" --destdir "$BUILD_DIR/dell/barclamps" \
- --os "$OS_TOKEN" "${BARCLAMPS[@]}"
-
- if [[ $ALLOW_CACHE_UPDATE != true && $CURRENT_CACHE_BRANCH ]]; then
- echo "build-cache: $(get_rev "$CACHE_DIR")" >> "$BUILD_DIR/build-info"
- fi
-
- (cd "$BUILD_DIR"
- find extra dell -type f -print | \
- sort >> "build-info")
- # Make sure we still provide the legacy ami location
- (cd "$BUILD_DIR"; ln -sf extra/files/ami)
- # Store off the version
- echo "$VERSION" >> "$BUILD_DIR/dell/Version"
-
- # Custom start-up in place
- for f in "$CROWBAR_DIR"/*.json ; do
- [[ -f $f ]] || continue
- mkdir -p "$BUILD_DIR/extra/config"
- cp "$f" "$BUILD_DIR/extra/config"
- done
-
- final_build_fixups
-
- # Copy over the bits that Sledgehammer will look for.
- debug "Copying over Sledgehammer bits"
- # If we need to copy over a new Sledgehammer image, do so.
- if [[ $SLEDGEHAMMER_DIR/bin/sledgehammer-tftpboot.tar.gz -nt \
- $SLEDGEHAMMER_PXE_DIR/initrd0.img ]]; then
- ( cd $SLEDGEHAMMER_PXE_DIR
- debug "Extracting new Sledgehammer TFTP boot image"
- rm -rf .
- cd ..
- tar xzf "$SLEDGEHAMMER_DIR/bin/sledgehammer-tftpboot.tar.gz"
- rm -f "$SLEDGEHAMMER_DIR/bin/sledgehammer-tftpboot.tar.gz"
- )
- fi
- cp -a "$SLEDGEHAMMER_PXE_DIR"/* "$BUILD_DIR/discovery"
-
- # Make our image
- debug "Creating new ISO"
- # Find files and directories that mkisofs will complain about.
- # Do just top-level overlapping directories for now.
- for d in $(cat <(cd "$BUILD_DIR"; find -maxdepth 1 -type d) \
- <(cd "$IMAGE_DIR"; find -maxdepth 1 -type d) | \
- sort |uniq -d); do
- [[ $d = . ]] && continue
- d=${d#./}
- # Copy contents of the found directories into $BUILD_DIR, taking care
- # to not clobber existing files.
- mkdir -p "$BUILD_DIR/$d"
- chmod u+wr "$BUILD_DIR/$d"
- # We could also use cp -n, but rhel5 and centos5 do not understand it.
- rsync -rl --ignore-existing --inplace "$IMAGE_DIR/$d/." "$BUILD_DIR/$d/."
- chmod -R u+wr "$BUILD_DIR/$d"
- # Bind mount an empty directory on the $IMAGE_DIR instance.
- sudo mount -t tmpfs -o size=1K tmpfs "$IMAGE_DIR/$d"
- done
- mkdir -p "$BUILD_DIR/isolinux"
- chmod u+wr "$BUILD_DIR/isolinux"
- rsync -rl --ignore-existing --inplace \
- "$IMAGE_DIR/isolinux/." "$BUILD_DIR/isolinux/."
- chmod -R u+wr "$BUILD_DIR/isolinux"
- sudo mount -t tmpfs -o size=1K tmpfs "$IMAGE_DIR/isolinux"
-
- [[ $SHRINK_ISO && ! $GENERATE_MINIMAL_ISO ]] && shrink_iso
- # Make a file list and a link list.
- ( cd $BUILD_DIR
- find . -type f | \
- sort > crowbar_files.list
- find . -type l | \
- xargs ls -ld | \
- awk '{ print $8 " " $10 }' | \
- sort > crowbar_links.list
- )
- ( cd $IMAGE_DIR
- find . -type f | \
- sort >> $BUILD_DIR/crowbar_files.list
- find . -type l | \
- xargs ls -ld | \
- awk '{ print $8 " " $10 }' | \
- sort >> $BUILD_DIR/crowbar_links.list
- )
-
- # Make an ISO
- build_iso || die "There was a problem building our ISO."
- if [[ $GENERATE_MINIMAL_INSTALL = true ]]; then
- if [[ ! -f "$CROWBAR_DIR/$OS_TOKEN-extra/minimal-install" ]]; then
- if [[ ! -f "$HOME/admin-installed.list" ]]; then
- SMOKETEST_ISO="$ISO_DEST/$BUILT_ISO"
- test_iso admin-only
- fi
- [[ -f "$HOME/admin-installed.list" ]] || \
- die "Could not generate minimal install list!"
- mv "$HOME/admin-installed.list" \
- "$CROWBAR_DIR/$OS_TOKEN-extra/minimal-install"
- debug "Minimal install generated and saved to $CROWBAR_DIR/$OS_TOKEN-extra/minimal-install."
- debug "Please commit it and rerun the build with --shrink."
- fi
- fi
- echo "$(date '+%F %T %z'): Image at $ISO_DEST/$BUILT_ISO"
- if [[ $NEED_TEST = true ]]; then
- echo "$(date '+%F %T %z'): Testing new iso"
- SMOKETEST_ISO="$ISO_DEST/$BUILT_ISO"
- if test_iso "${test_params[@]}"; then
- echo "$(date '+%F %T %z'): Test passed"
- if [[ $CI_BARCLAMP ]]; then
- in_ci_barclamp git checkout master && \
- in_ci_barclamp git merge ci-throwaway-branch || \
- die "Could not merge $CI_BRANCH into master for $CI_BARCLAMP"
- in_repo git add "barclamps/$CI_BARCLAMP" && \
- in_repo git commit -m "Jenkins tested branch $CI_BRANCH of $CI_BARCLAMP on $(date '+%F %T %z'), and found it good." || \
- die "Could not update submodule reference for $CI_BARCLAMP"
- fi
- else
- [[ $CI_BARCLAMP ]] && \
- echo "$(date '+%F %T %z'): Continuous integration test on $CI_BARCLAMP failed."
- die "Test failed."
- fi
-
- fi
- echo "$(date '+%F %T %z'): Finished."
-} 65> /tmp/.build_crowbar.lock
View
168 ubuntu-11.04-extra/build_lib.sh
@@ -8,171 +8,5 @@ OS=ubuntu
OS_VERSION=11.04
OS_TOKEN="$OS-$OS_VERSION"
OS_CODENAME=natty
-# Server to download the mirror from if we need to.
-ISO_MIRROR="http://mirror.anl.gov/pub"
-PKG_TYPE="debs"
-PKG_ALLOWED_ARCHES=("amd64" "all")
-CHROOT_PKGDIR="var/cache/apt/archives"
-CHROOT_GEMDIR="var/lib/gems/1.8/cache"
-declare -A SEEN_DEBS
-# The name of the OS iso we are using as a base.
-[[ $ISO ]] || ISO="ubuntu-$OS_VERSION-server-amd64.iso"
-# The location for OS packages on $ISO
-find_cd_pool() ( echo "$IMAGE_DIR/pool"; )
-
-fetch_os_iso() {
- # Try and download our ISO if we don't already have it
- echo "$(date '+%F %T %z'): Downloading and caching $ISO"
- curl -o "$ISO_LIBRARY/$ISO" \
- "$ISO_MIRROR/ubuntu-iso/CDs/$OS_VERSION/$ISO" || \
- die 1 "Missing our source image"
-}
-
-# Have the chroot update its package databases.
-chroot_update() { in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated update; }
-
-# Install some packages in the chroot environment.
-chroot_install() {
- if [[ $1 ]]; then
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated install "$@"
- fi
-}
-
-# Fetch (but do not install) packages into the chroot environment
-chroot_fetch() {
- if [[ $1 ]]; then
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated --download-only install "$@"
- fi
-}
-
-# Add repositories to the local chroot environment.
-add_repos() {
- local repo ppas=()
- local f=$(mktemp /tmp/ubuntu_repos.XXXXXX)
- for repo in "$@"; do
- case $repo in
- ppa*) ppas+=("${ppa#* }");;
- deb*) echo "$repo" >> "$f";;
- *) die "Unknown Debian repository type $repo";;
- esac
- done
- in_chroot mkdir -p /etc/apt/sources.list.d
- sudo cp "$f" "$CHROOT/etc/apt/sources.list.d/${f##*.}.list"
- rm "$f"
- [[ $ppas ]] || return 0
- chroot_install python-software-properties
- for repo in "${ppas[@]}"; do
- in_chroot apt-add-repository "ppa:${repo}"
- done
-}
-
-# Test to see we were passed a valid package file name.
-is_pkg() { [[ $1 = *.deb ]]; }
-
-# Look up name and version information for a package using
-# dpkg. Make sure and memoize things.
-dpkg_info() {
- # $1 = package to examine
- local name arch ver f1 f2
- [[ -f $1 && $1 = *.deb ]] || die "$1 is not a debian package!"
- if [[ ! ${SEEN_DEBS["${1##*/}"]} ]]; then
- while read f1 f2; do
- case $f1 in
- Package:) name="$f2";;
- Version:) ver="$f2";;
- Architecture:) arch="$f2";;
- esac
- [[ $name && $ver && $arch ]] && break || :
- done < <(dpkg -I "$1")
- SEEN_DEBS["${1##*/}"]="$name-$arch $ver"
- fi
- echo "${SEEN_DEBS["${1##*/}"]}"
-}
-
-# Get the package file name in $name-$arch format.
-pkg_name() {
- local n="$(dpkg_info "$1")"
- echo "${n%% *}"
-}
-
-# OS specific part of making our chroot environment.
-__make_chroot() {
- # debootstrap a minimal install of our target version of
- # Ubuntu to ensure that we don't interfere with the host's package cache.
- local d repo bc f
- sudo mount -t tmpfs -o size=4G "$OS_TOKEN-chroot" "$CHROOT"
- sudo debootstrap "$OS_CODENAME" "$CHROOT" \
- "file://$IMAGE_DIR" || \
- die 1 "Could not bootstrap our scratch target!"
- # mount some important directories for the chroot
- for d in proc sys dev dev/pts; do
- bind_mount "/$d" "$CHROOT/$d"
- done
- in_chroot mkdir -p "/base_repo"
- sudo mount --bind "$IMAGE_DIR" "$CHROOT/base_repo"
- # make sure the chroot can resolve hostnames
- sudo cp /etc/resolv.conf "$CHROOT/etc/resolv.conf"
- # make sure the chroot honors proxies
- if [[ $http_proxy || $https_proxy ]]; then
- f=$(mktemp /tmp/apt.http.conf.XXXXXX)
- [[ $http_proxy ]] && echo \
- "Acquire::http::Proxy \"$http_proxy\";" >> "$f"
- [[ $https_proxy ]] && echo \
- "Acquire::https::Proxy \"$https_proxy\";" >> "$f"
- echo "Acquire::http::Proxy::127.0.0.1 \"DIRECT\";" >> "$f"
- in_chroot mkdir -p "/etc/apt/apt.conf.d/"
- sudo cp "$f" "$CHROOT/etc/apt/apt.conf.d/00http_proxy"
- fi
-}
-
-# Test to see of package $1 is more recent than package $2
-pkg_cmp() {
- # $1 = Debian package 1
- # $2 = Debian package 2
- local deb1="$(dpkg_info "$1")"
- local deb2="$(dpkg_info "$2")"
- [[ ${deb1%% *} = ${deb2%% *} ]] || \
- die "$1 and $2 do not reference the same package!"
- vercmp "${deb1#* }" "${deb2#* }"
-}
-
-final_build_fixups() {
- # Copy our isolinux and preseed files.
- mv "$BUILD_DIR/extra/isolinux" "$BUILD_DIR/extra/preseed" "$BUILD_DIR"
- # Copy our initrd images
- (cd "$IMAGE_DIR"; find -name initrd.gz |cpio -o) | \
- (cd "$BUILD_DIR"; cpio -i --make-directories)
- chmod -R u+w "$BUILD_DIR"
- # Fix up the initrd
- ( cd "$CROWBAR_DIR/initrd"
- debug "Fixing up initrd"
- [[ -d scratch ]] && rm -rf scratch
- mkdir scratch
- # Grab _all_ the nic drivers. We probably don't need them,
- # but a little paranoia never hurt anyone.
- ( cd scratch;
- debug "Adding all nic drivers"
- for udeb in "$IMAGE_DIR/pool/main/l/linux/"nic-*-generic-*.udeb; do
- ar x "$udeb"
- tar xzf data.tar.gz
- rm -rf debian-binary *.tar.gz
- done
- # Make sure installing off a USB connected DVD will work
- debug "Adding USB connected DVD support"
- mkdir -p var/lib/dpkg/info
- cp ../cdrom-detect.postinst var/lib/dpkg/info
- # Append our new gzipped CPIO archive onto the old one.
- find . |cpio --create --format=newc --owner root:root 2>/dev/null | \
- gzip -9 >> "$BUILD_DIR/install/initrd.gz" )
- rm -rf scratch )
-}
-
-# Check to make sure all our prerequisites are met.
-for cmd in debootstrap ar; do
- which "$cmd" &>/dev/null || \
- die "Please install $cmd before trying to build Crowbar."
-done
+. "$CROWBAR_DIR/ubuntu-common/build_lib.sh"
View
168 ubuntu-11.10-extra/build_lib.sh
@@ -8,171 +8,5 @@ OS=ubuntu
OS_VERSION=11.10
OS_TOKEN="$OS-$OS_VERSION"
OS_CODENAME=oneiric
-# Server to download the mirror from if we need to.
-ISO_MIRROR="http://mirror.anl.gov/pub"
-PKG_TYPE="debs"
-PKG_ALLOWED_ARCHES=("amd64" "all")
-CHROOT_PKGDIR="var/cache/apt/archives"
-CHROOT_GEMDIR="var/lib/gems/1.8/cache"
-declare -A SEEN_DEBS
-# The name of the OS iso we are using as a base.
-[[ $ISO ]] || ISO="ubuntu-$OS_VERSION-server-amd64.iso"
-# The location for OS packages on $ISO
-find_cd_pool() ( echo "$IMAGE_DIR/pool"; )
-
-fetch_os_iso() {
- # Try and download our ISO if we don't already have it
- echo "$(date '+%F %T %z'): Downloading and caching $ISO"
- curl -o "$ISO_LIBRARY/$ISO" \
- "$ISO_MIRROR/ubuntu-iso/CDs/$OS_VERSION/$ISO" || \
- die 1 "Missing our source image"
-}
-
-# Have the chroot update its package databases.
-chroot_update() { in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated update; }
-
-# Install some packages in the chroot environment.
-chroot_install() {
- if [[ $1 ]]; then
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated install "$@"
- fi
-}
-
-# Fetch (but do not install) packages into the chroot environment
-chroot_fetch() {
- if [[ $1 ]]; then
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated --download-only install "$@"
- fi
-}
-
-# Add repositories to the local chroot environment.
-add_repos() {
- local repo ppas=()
- local f=$(mktemp /tmp/ubuntu_repos.XXXXXX)
- for repo in "$@"; do
- case $repo in
- ppa*) ppas+=("${ppa#* }");;
- deb*) echo "$repo" >> "$f";;
- *) die "Unknown Debian repository type $repo";;
- esac
- done
- in_chroot mkdir -p /etc/apt/sources.list.d
- sudo cp "$f" "$CHROOT/etc/apt/sources.list.d/${f##*.}.list"
- rm "$f"
- [[ $ppas ]] || return 0
- chroot_install python-software-properties
- for repo in "${ppas[@]}"; do
- in_chroot apt-add-repository "ppa:${repo}"
- done
-}
-
-# Test to see we were passed a valid package file name.
-is_pkg() { [[ $1 = *.deb ]]; }
-
-# Look up name and version information for a package using
-# dpkg. Make sure and memoize things.
-dpkg_info() {
- # $1 = package to examine
- local name arch ver f1 f2
- [[ -f $1 && $1 = *.deb ]] || die "$1 is not a debian package!"
- if [[ ! ${SEEN_DEBS["${1##*/}"]} ]]; then
- while read f1 f2; do
- case $f1 in
- Package:) name="$f2";;
- Version:) ver="$f2";;
- Architecture:) arch="$f2";;
- esac
- [[ $name && $ver && $arch ]] && break || :
- done < <(dpkg -I "$1")
- SEEN_DEBS["${1##*/}"]="$name-$arch $ver"
- fi
- echo "${SEEN_DEBS["${1##*/}"]}"
-}
-
-# Get the package file name in $name-$arch format.
-pkg_name() {
- local n="$(dpkg_info "$1")"
- echo "${n%% *}"
-}
-
-# OS specific part of making our chroot environment.
-__make_chroot() {
- # debootstrap a minimal install of our target version of
- # Ubuntu to ensure that we don't interfere with the host's package cache.
- local d repo bc f
- sudo mount -t tmpfs -o size=4G "$OS_TOKEN-chroot" "$CHROOT"
- sudo debootstrap "$OS_CODENAME" "$CHROOT" \
- "file://$IMAGE_DIR" || \
- die 1 "Could not bootstrap our scratch target!"
- # mount some important directories for the chroot
- for d in proc sys dev dev/pts; do
- bind_mount "/$d" "$CHROOT/$d"
- done
- in_chroot mkdir -p "/base_repo"
- sudo mount --bind "$IMAGE_DIR" "$CHROOT/base_repo"
- # make sure the chroot can resolve hostnames
- sudo cp /etc/resolv.conf "$CHROOT/etc/resolv.conf"
- # make sure the chroot honors proxies
- if [[ $http_proxy || $https_proxy ]]; then
- f=$(mktemp /tmp/apt.http.conf.XXXXXX)
- [[ $http_proxy ]] && echo \
- "Acquire::http::Proxy \"$http_proxy\";" >> "$f"
- [[ $https_proxy ]] && echo \
- "Acquire::https::Proxy \"$https_proxy\";" >> "$f"
- echo "Acquire::http::Proxy::127.0.0.1 \"DIRECT\";" >> "$f"
- in_chroot mkdir -p "/etc/apt/apt.conf.d/"
- sudo cp "$f" "$CHROOT/etc/apt/apt.conf.d/00http_proxy"
- fi
-}
-
-# Test to see of package $1 is more recent than package $2
-pkg_cmp() {
- # $1 = Debian package 1
- # $2 = Debian package 2
- local deb1="$(dpkg_info "$1")"
- local deb2="$(dpkg_info "$2")"
- [[ ${deb1%% *} = ${deb2%% *} ]] || \
- die "$1 and $2 do not reference the same package!"
- vercmp "${deb1#* }" "${deb2#* }"
-}
-
-final_build_fixups() {
- # Copy our isolinux and preseed files.
- mv "$BUILD_DIR/extra/isolinux" "$BUILD_DIR/extra/preseed" "$BUILD_DIR"
- # Copy our initrd images
- (cd "$IMAGE_DIR"; find -name initrd.gz |cpio -o) | \
- (cd "$BUILD_DIR"; cpio -i --make-directories)
- chmod -R u+w "$BUILD_DIR"
- # Fix up the initrd
- ( cd "$CROWBAR_DIR/initrd"
- debug "Fixing up initrd"
- [[ -d scratch ]] && rm -rf scratch
- mkdir scratch
- # Grab _all_ the nic drivers. We probably don't need them,
- # but a little paranoia never hurt anyone.
- ( cd scratch;
- debug "Adding all nic drivers"
- for udeb in "$IMAGE_DIR/pool/main/l/linux/"nic-*-generic-*.udeb; do
- ar x "$udeb"
- tar xzf data.tar.gz
- rm -rf debian-binary *.tar.gz
- done
- # Make sure installing off a USB connected DVD will work
- debug "Adding USB connected DVD support"
- mkdir -p var/lib/dpkg/info
- cp ../cdrom-detect.postinst var/lib/dpkg/info
- # Append our new gzipped CPIO archive onto the old one.
- find . |cpio --create --format=newc --owner root:root 2>/dev/null | \
- gzip -9 >> "$BUILD_DIR/install/initrd.gz" )
- rm -rf scratch )
-}
-
-# Check to make sure all our prerequisites are met.
-for cmd in debootstrap ar; do
- which "$cmd" &>/dev/null || \
- die "Please install $cmd before trying to build Crowbar."
-done
+. "$CROWBAR_DIR/ubuntu-common/build_lib.sh"
View
56 ubuntu-common/NOTES
@@ -0,0 +1,56 @@
+# Copyright 2011, Dell
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+To get through dell firewalls, you need to set the following environment variables (change <password> to the appropriate thing):
+export http_proxy='http://Americas\Gregory_Althaus:<password>@proxy.us.dell.com:80'
+export HTTP_PROXY='http://Americas\Gregory_Althaus:<password>@proxy.us.dell.com:80'
+export https_proxy='http://Americas\Gregory_Althaus:<password>@proxy.us.dell.com:80'
+export HTTPS_PROXY='http://Americas\Gregory_Althaus:<password>@proxy.us.dell.com:80'
+
+To get canonical based debs:
+1. Edit sources.list in the /etc/apt directory
+ a. Change all the #deb to deb
+ b. Comment out the local repos
+2. Run: apt-get update
+3. Run: rm /var/cache/apt/archives/*.deb
+4. Run: apt-get install <packages you need>
+5. Run: cp /var/cache/apt/archives/*.deb /tftpboot/ubuntu_dvd/extra/debs
+
+To get swift debs:
+1. Add the swift repository
+ a. For latest stable: add-apt-repository ppa:swift-core/ppa
+ b. For latest dev: add-apt-repository ppa:swift-core/trunk
+2. Run: apt-get update
+3. Run: rm /var/cache/apt/archives/*.deb
+4. Run: apt-get install python-swift
+5. Run: apt-get install swift swift-proxy swift-account swift-object swift-container
+6. Run: rm /tftpboot/ubuntu_dvd/extra/debs/openstack/swift/*
+7. Run: cp /var/cache/apt/archives/*.deb /tftpboot/ubuntu_dvd/extra/debs/openstack/swift
+
+To get nova debs:
+1. Add the nova repository
+ a. For latest stable: add-apt-repository ppa:nova-core/ppa
+ b. For latest dev: add-apt-repository ppa:nova-core/trunk
+2. Run: apt-get update
+3. Run: rm /var/cache/apt/archives/*.deb
+4. Run: apt-get install python-nova
+5. Run: apt-get install nova-common nova-api nova-objectstore nova-volume nova-compute nova-scheduler nova-doc nova-network nova-ajax-console-proxy
+6. Run: rm /tftpboot/ubuntu_dvd/extra/debs/openstack/nova/*
+7. Run: cp /var/cache/apt/archives/*.deb /tftpboot/ubuntu_dvd/extra/debs/openstack/nova
+
+To build a repos from this directory:
+1. apt-get install build-essential
+2. run: dpkg-scanpackages ./ /dev/null | gzip > Packages.gz
+
View
1 ubuntu-common/apt.conf
@@ -0,0 +1 @@
+APT::Get::AllowUnauthenticated 1 ;
View
183 ubuntu-common/build_lib.sh
@@ -0,0 +1,183 @@
+#!/bin/bash
+# This file is sourced by build_crowbar.sh when you want to build Crowbar
+# using Ubuntu as the base OS. It includes build routines common to all
+# Ubuntu distributions (so far).
+
+# Server to download the mirror from if we need to.
+ISO_MIRROR="http://mirror.anl.gov/pub"
+PKG_TYPE="debs"
+PKG_ALLOWED_ARCHES=("amd64" "all")
+CHROOT_PKGDIR="var/cache/apt/archives"
+CHROOT_GEMDIR="var/lib/gems/1.8/cache"
+declare -A SEEN_DEBS
+# The name of the OS iso we are using as a base.
+[[ $ISO ]] || ISO="ubuntu-$OS_VERSION-server-amd64.iso"
+
+# The location for OS packages on $ISO
+find_cd_pool() ( echo "$IMAGE_DIR/pool"; )
+
+fetch_os_iso() {
+ # Try and download our ISO if we don't already have it
+ echo "$(date '+%F %T %z'): Downloading and caching $ISO"
+ curl -o "$ISO_LIBRARY/$ISO" \
+ "$ISO_MIRROR/ubuntu-iso/CDs/$OS_VERSION/$ISO" || \
+ die 1 "Missing our source image"
+}
+
+# Have the chroot update its package databases.
+chroot_update() { in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated update; }
+
+# Install some packages in the chroot environment.
+chroot_install() {
+ if [[ $1 ]]; then
+ in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated install "$@"
+ fi
+}
+
+# Fetch (but do not install) packages into the chroot environment
+chroot_fetch() {
+ if [[ $1 ]]; then
+ in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated --download-only install "$@"
+ fi
+}
+
+# Add repositories to the local chroot environment.
+add_repos() {
+ local repo ppas=()
+ local f=$(mktemp /tmp/ubuntu_repos.XXXXXX)
+ for repo in "$@"; do
+ case $repo in
+ ppa*) ppas+=("${ppa#* }");;
+ deb*) echo "$repo" >> "$f";;
+ *) die "Unknown Debian repository type $repo";;
+ esac
+ done
+ in_chroot mkdir -p /etc/apt/sources.list.d
+ sudo cp "$f" "$CHROOT/etc/apt/sources.list.d/${f##*.}.list"
+ rm "$f"
+ [[ $ppas ]] || return 0
+ chroot_install python-software-properties
+ for repo in "${ppas[@]}"; do
+ in_chroot apt-add-repository "ppa:${repo}"
+ done
+}
+
+# Test to see we were passed a valid package file name.
+is_pkg() { [[ $1 = *.deb ]]; }
+
+# Look up name and version information for a package using
+# dpkg. Make sure and memoize things.
+dpkg_info() {
+ # $1 = package to examine
+ local name arch ver f1 f2
+ [[ -f $1 && $1 = *.deb ]] || die "$1 is not a debian package!"
+ if [[ ! ${SEEN_DEBS["${1##*/}"]} ]]; then
+ while read f1 f2; do
+ case $f1 in
+ Package:) name="$f2";;
+ Version:) ver="$f2";;
+ Architecture:) arch="$f2";;
+ esac
+ [[ $name && $ver && $arch ]] && break || :
+ done < <(dpkg -I "$1")
+ SEEN_DEBS["${1##*/}"]="$name-$arch $ver"
+ fi
+ echo "${SEEN_DEBS["${1##*/}"]}"
+}
+
+# Get the package file name in $name-$arch format.
+pkg_name() {
+ local n="$(dpkg_info "$1")"
+ echo "${n%% *}"
+}
+
+# OS specific part of making our chroot environment.
+__make_chroot() {
+ # debootstrap a minimal install of our target version of
+ # Ubuntu to ensure that we don't interfere with the host's package cache.
+ local d repo bc f
+ sudo mount -t tmpfs -o size=4G "$OS_TOKEN-chroot" "$CHROOT"
+ sudo debootstrap "$OS_CODENAME" "$CHROOT" \
+ "file://$IMAGE_DIR" || \
+ die 1 "Could not bootstrap our scratch target!"
+ # mount some important directories for the chroot
+ for d in proc sys dev dev/pts; do
+ bind_mount "/$d" "$CHROOT/$d"
+ done
+ in_chroot mkdir -p "/base_repo"
+ sudo mount --bind "$IMAGE_DIR" "$CHROOT/base_repo"
+ # make sure the chroot can resolve hostnames
+ sudo cp /etc/resolv.conf "$CHROOT/etc/resolv.conf"
+ # make sure the chroot honors proxies
+ if [[ $http_proxy || $https_proxy ]]; then
+ f=$(mktemp /tmp/apt.http.conf.XXXXXX)
+ [[ $http_proxy ]] && echo \
+ "Acquire::http::Proxy \"$http_proxy\";" >> "$f"
+ [[ $https_proxy ]] && echo \
+ "Acquire::https::Proxy \"$https_proxy\";" >> "$f"
+ echo "Acquire::http::Proxy::127.0.0.1 \"DIRECT\";" >> "$f"
+ in_chroot mkdir -p "/etc/apt/apt.conf.d/"
+ sudo cp "$f" "$CHROOT/etc/apt/apt.conf.d/00http_proxy"
+ fi
+}
+
+# Test to see of package $1 is more recent than package $2
+pkg_cmp() {
+ # $1 = Debian package 1
+ # $2 = Debian package 2
+ local deb1="$(dpkg_info "$1")"
+ local deb2="$(dpkg_info "$2")"
+ [[ ${deb1%% *} = ${deb2%% *} ]] || \
+ die "$1 and $2 do not reference the same package!"
+ vercmp "${deb1#* }" "${deb2#* }"
+}
+
+final_build_fixups() {
+ # Copy our isolinux and preseed files.
+ mv "$BUILD_DIR/extra/isolinux" "$BUILD_DIR/extra/preseed" "$BUILD_DIR"
+ # Copy our initrd images
+ debug "Fixing up initrds"
+ [[ -d $BUILD_DIR/initrd ]] && rm -rf initrd
+ mkdir -p "$BUILD_DIR/initrd"
+ # Grab _all_ the nic drivers. We probably don't need them,
+ # but a little paranoia never hurt anyone.
+ ( cd "$BUILD_DIR/initrd";
+ debug "Adding all nic drivers"
+ for udeb in "$IMAGE_DIR/pool/main/l/linux/"nic-*-generic-*.udeb; do
+ ar x "$udeb"
+ tar xzf data.tar.gz
+ rm -rf debian-binary *.tar.gz
+ done
+ # Make sure installing off a USB connected DVD will work
+ debug "Adding USB connected DVD support"
+ mkdir -p var/lib/dpkg/info
+ cp "$CROWBAR_DIR/initrd/cdrom-detect.postinst" var/lib/dpkg/info
+ debug "Enabling bootif support for debian-installer"
+ mkdir -p lib/debian-installer-startup.d/
+ cp "$CROWBAR_DIR/$OS_TO_STAGE-extra/patches/bootif" \
+ lib/debian-installer-startup.d/S32set-bootif
+ chmod 755 "lib/debian-installer-startup.d/S32set-bootif"
+ for initrd in "install/initrd.gz" \
+ "install/netboot/ubuntu-installer/amd64/initrd.gz"; do
+ [[ -f $IMAGE_DIR/$initrd ]] || continue
+ mkdir -p "$BUILD_DIR/${initrd%/*}"
+ gunzip -c "$IMAGE_DIR/$initrd" >"$BUILD_DIR/initrd.tmp"
+ find . -type f | \
+ cpio --format newc --owner root:root \
+ -oAF "$BUILD_DIR/initrd.tmp"
+ cat "$BUILD_DIR/initrd.tmp" | \
+ gzip -9 > "$BUILD_DIR/$initrd"
+ done
+ rm "$BUILD_DIR/initrd.tmp"
+ )
+ # rm -rf "$BUILD_DIR/initrd"
+}
+
+# Check to make sure all our prerequisites are met.
+for cmd in debootstrap ar; do
+ which "$cmd" &>/dev/null || \
+ die "Please install $cmd before trying to build Crowbar."
+done
View
188 ubuntu-common/build_lib.sh~
@@ -0,0 +1,188 @@
+#!/bin/bash
+# This file is sourced by build_crowbar.sh when you want to build Crowbar
+# using Ubuntu 10.10 as the base OS. It includes all Ubuntu 10.10 specific
+# build routines.
+
+# OS information for the OS we are building crowbar on to.
+OS=ubuntu
+OS_VERSION=10.10
+OS_TOKEN="$OS-$OS_VERSION"
+OS_CODENAME=maverick
+# Server to download the mirror from if we need to.
+ISO_MIRROR="http://mirror.anl.gov/pub"
+PKG_TYPE="debs"
+PKG_ALLOWED_ARCHES=("amd64" "all")
+CHROOT_PKGDIR="var/cache/apt/archives"
+CHROOT_GEMDIR="var/lib/gems/1.8/cache"
+declare -A SEEN_DEBS
+# The name of the OS iso we are using as a base.
+[[ $ISO ]] || ISO="ubuntu-$OS_VERSION-server-amd64.iso"
+
+# The location for OS packages on $ISO
+find_cd_pool() ( echo "$IMAGE_DIR/pool"; )
+
+fetch_os_iso() {
+ # Try and download our ISO if we don't already have it
+ echo "$(date '+%F %T %z'): Downloading and caching $ISO"
+ curl -o "$ISO_LIBRARY/$ISO" \
+ "$ISO_MIRROR/ubuntu-iso/CDs/$OS_VERSION/$ISO" || \
+ die 1 "Missing our source image"
+}
+
+# Have the chroot update its package databases.
+chroot_update() { in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated update; }
+
+# Install some packages in the chroot environment.
+chroot_install() {
+ if [[ $1 ]]; then
+ in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated install "$@"
+ fi
+}
+
+# Fetch (but do not install) packages into the chroot environment
+chroot_fetch() {
+ if [[ $1 ]]; then
+ in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated --download-only install "$@"
+ fi
+}
+
+# Add repositories to the local chroot environment.
+add_repos() {
+ local repo ppas=()
+ local f=$(mktemp /tmp/ubuntu_repos.XXXXXX)
+ for repo in "$@"; do
+ case $repo in
+ ppa*) ppas+=("${ppa#* }");;
+ deb*) echo "$repo" >> "$f";;
+ *) die "Unknown Debian repository type $repo";;
+ esac
+ done
+ in_chroot mkdir -p /etc/apt/sources.list.d
+ sudo cp "$f" "$CHROOT/etc/apt/sources.list.d/${f##*.}.list"
+ rm "$f"
+ [[ $ppas ]] || return 0
+ chroot_install python-software-properties
+ for repo in "${ppas[@]}"; do
+ in_chroot apt-add-repository "ppa:${repo}"
+ done
+}
+
+# Test to see we were passed a valid package file name.
+is_pkg() { [[ $1 = *.deb ]]; }
+
+# Look up name and version information for a package using
+# dpkg. Make sure and memoize things.
+dpkg_info() {
+ # $1 = package to examine
+ local name arch ver f1 f2
+ [[ -f $1 && $1 = *.deb ]] || die "$1 is not a debian package!"
+ if [[ ! ${SEEN_DEBS["${1##*/}"]} ]]; then
+ while read f1 f2; do
+ case $f1 in
+ Package:) name="$f2";;
+ Version:) ver="$f2";;
+ Architecture:) arch="$f2";;
+ esac
+ [[ $name && $ver && $arch ]] && break || :
+ done < <(dpkg -I "$1")
+ SEEN_DEBS["${1##*/}"]="$name-$arch $ver"
+ fi
+ echo "${SEEN_DEBS["${1##*/}"]}"
+}
+
+# Get the package file name in $name-$arch format.
+pkg_name() {
+ local n="$(dpkg_info "$1")"
+ echo "${n%% *}"
+}
+
+# OS specific part of making our chroot environment.
+__make_chroot() {
+ # debootstrap a minimal install of our target version of
+ # Ubuntu to ensure that we don't interfere with the host's package cache.
+ local d repo bc f
+ sudo mount -t tmpfs -o size=4G "$OS_TOKEN-chroot" "$CHROOT"
+ sudo debootstrap "$OS_CODENAME" "$CHROOT" \
+ "file://$IMAGE_DIR" || \
+ die 1 "Could not bootstrap our scratch target!"
+ # mount some important directories for the chroot
+ for d in proc sys dev dev/pts; do
+ bind_mount "/$d" "$CHROOT/$d"
+ done
+ in_chroot mkdir -p "/base_repo"
+ sudo mount --bind "$IMAGE_DIR" "$CHROOT/base_repo"
+ # make sure the chroot can resolve hostnames
+ sudo cp /etc/resolv.conf "$CHROOT/etc/resolv.conf"
+ # make sure the chroot honors proxies
+ if [[ $http_proxy || $https_proxy ]]; then
+ f=$(mktemp /tmp/apt.http.conf.XXXXXX)
+ [[ $http_proxy ]] && echo \
+ "Acquire::http::Proxy \"$http_proxy\";" >> "$f"
+ [[ $https_proxy ]] && echo \
+ "Acquire::https::Proxy \"$https_proxy\";" >> "$f"
+ echo "Acquire::http::Proxy::127.0.0.1 \"DIRECT\";" >> "$f"
+ in_chroot mkdir -p "/etc/apt/apt.conf.d/"
+ sudo cp "$f" "$CHROOT/etc/apt/apt.conf.d/00http_proxy"
+ fi
+}
+
+# Test to see of package $1 is more recent than package $2
+pkg_cmp() {
+ # $1 = Debian package 1
+ # $2 = Debian package 2
+ local deb1="$(dpkg_info "$1")"
+ local deb2="$(dpkg_info "$2")"
+ [[ ${deb1%% *} = ${deb2%% *} ]] || \
+ die "$1 and $2 do not reference the same package!"
+ vercmp "${deb1#* }" "${deb2#* }"
+}
+
+final_build_fixups() {
+ # Copy our isolinux and preseed files.
+ mv "$BUILD_DIR/extra/isolinux" "$BUILD_DIR/extra/preseed" "$BUILD_DIR"
+ # Copy our initrd images
+ debug "Fixing up initrds"
+ [[ -d $BUILD_DIR/initrd ]] && rm -rf initrd
+ mkdir -p "$BUILD_DIR/initrd"
+ # Grab _all_ the nic drivers. We probably don't need them,
+ # but a little paranoia never hurt anyone.
+ ( cd "$BUILD_DIR/initrd";
+ debug "Adding all nic drivers"
+ for udeb in "$IMAGE_DIR/pool/main/l/linux/"nic-*-generic-*.udeb; do
+ ar x "$udeb"
+ tar xzf data.tar.gz
+ rm -rf debian-binary *.tar.gz
+ done
+ # Make sure installing off a USB connected DVD will work
+ debug "Adding USB connected DVD support"
+ mkdir -p var/lib/dpkg/info
+ cp "$CROWBAR_DIR/initrd/cdrom-detect.postinst" var/lib/dpkg/info
+ debug "Enabling bootif support for debian-installer"
+ mkdir -p lib/debian-installer-startup.d/
+ cp "$CROWBAR_DIR/$OS_TO_STAGE-extra/patches/bootif" \
+ lib/debian-installer-startup.d/S32set-bootif
+ chmod 755 "lib/debian-installer-startup.d/S32set-bootif"
+ for initrd in "install/initrd.gz" \
+ "install/netboot/ubuntu-installer/amd64/initrd.gz"; do
+ [[ -f $IMAGE_DIR/$initrd ]] || continue
+ mkdir -p "$BUILD_DIR/${initrd%/*}"
+ gunzip -c "$IMAGE_DIR/$initrd" >"$BUILD_DIR/initrd.tmp"
+ find . -type f | \
+ cpio --format newc --owner root:root \
+ -oAF "$BUILD_DIR/initrd.tmp"
+ cat "$BUILD_DIR/initrd.tmp" | \
+ gzip -9 > "$BUILD_DIR/$initrd"
+ done
+ rm "$BUILD_DIR/initrd.tmp"
+ )
+ # rm -rf "$BUILD_DIR/initrd"
+}
+
+# Check to make sure all our prerequisites are met.
+for cmd in debootstrap ar; do
+ which "$cmd" &>/dev/null || \
+ die "Please install $cmd before trying to build Crowbar."
+done
View
54 ubuntu-common/chef_install_lib.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Ubuntu specific chef install functionality
+DVD_PATH="/tftpboot/ubuntu_dvd"
+OS_TOKEN="ubuntu-10.10"
+update_hostname() { update_hostname.sh $FQDN; }
+
+install_base_packages() {
+ cp apt.conf /etc/apt
+ log_to apt sed -i "s/__HOSTNAME__/$FQDN/g" ./debsel.conf
+ log_to apt /usr/bin/debconf-set-selections ./debsel.conf
+ # First, make a repo for crowbar-extras
+ apt-get -y install dpkg-dev
+ mkdir -p "/tftpboot/$OS_TOKEN/crowbar-extra"
+ (cd "/tftpboot/$OS_TOKEN/crowbar-extra";
+ # Find all the staged barclamps
+ for bc in "/opt/dell/barclamps/"*; do
+ [[ -d $bc/cache/$OS_TOKEN/pkgs ]] || continue
+ # Link them in.
+ ln -s "$bc/cache/$OS_TOKEN/pkgs" "${bc##*/}"
+ done
+ dpkg-scanpackages . 2>/dev/null |gzip -9 >Packages.gz)
+ echo "deb file:/tftpboot/$OS_TOKEN/crowbar-extra /" >>/etc/apt/sources.list
+ log_to apt apt-get update
+ log_to apt apt-get -y remove apparmor
+ log_to apt apt-get -y install rubygems gcc ruby \
+ libcurl4-gnutls-dev build-essential ruby-dev
+}
+
+bring_up_chef() {
+ log_to apt apt-get -y install chef kwalify
+ service chef-client stop
+ killall chef-client
+ log_to apt apt-get -y install chef-server chef-server-webui
+
+ # HACK AROUND CHEF-2005
+ cp patches/data_item.rb /usr/share/chef-server-api/app/controllers
+ # HACK AROUND CHEF-2005
+ rl=$(find /usr/lib/ruby -name run_list.rb)
+ cp -f "$rl" "$rl.bak"
+ cp -f patches/run_list.rb "$rl"
+ # Make the Rubygems provider in Chef respect gemrc files.
+ cp -f patches/rubygems.rb /usr/lib/ruby/vendor_ruby/chef/provider/package
+ log_to svc service chef-server restart
+}
+
+pre_crowbar_fixups() { : ; }
+
+update_admin_node() {
+ log_to apt apt-get -y upgrade
+}
+
+restart_ssh() {
+ service ssh restart
+}
View
130 ubuntu-common/common_install.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+# Copyright 2011, Dell
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# This script is called by the other install scripts to layout the crowbar
+# software + dell pieces.
+#
+# Requires:
+# /tftpboot/ubuntu_dvd is populated with a tarball of the dvd and this file.
+#
+
+export PS4='${BASH_SOURCE}@${LINENO}(${FUNCNAME[0]}): '
+set -x
+exec > /root/post-install.log 2>&1
+
+BASEDIR="/tftpboot/ubuntu_dvd"
+OS_TOKEN="ubuntu-10.10"
+
+# Make sure /opt is created
+mkdir -p /opt/dell/bin
+
+mkdir -p "/tftpboot/$OS_TOKEN"
+(cd "/tftpboot/$OS_TOKEN"; ln -s ../ubuntu_dvd install)
+echo "deb file:/tftpboot/$OS_TOKEN/install maverick main restricted" \
+ > /etc/apt/sources.list
+
+# Make a destination for dell finishing scripts
+
+finishing_scripts=(update_hostname.sh parse_node_data)
+( cd "$BASEDIR/dell"; cp "${finishing_scripts[@]}" /opt/dell/bin; )
+
+# "Install h2n for named management"
+cd /opt/dell/
+tar -zxf "$BASEDIR/extra/h2n.tar.gz"
+ln -s /opt/dell/h2n-2.56/h2n /opt/dell/bin/h2n
+
+# Set up initial syslog
+cp "$BASEDIR/rsyslog.d/"* /etc/rsyslog.d/
+
+# Barclamp preparation (put them in the right places)
+mkdir /opt/dell/barclamps
+for i in "$BASEDIR/dell/barclamps/"*".tar.gz"; do
+ [[ -f $i ]] || continue
+ (cd /opt/dell/barclamps && tar xzf "$i")
+ echo "copy new format $i"
+done
+
+barclamp_scripts=(barclamp_install.rb barclamp_multi.rb)
+( cd "/opt/dell/barclamps/crowbar/bin"; \
+ cp "${barclamp_scripts[@]}" /opt/dell/bin; )
+
+# Make sure the bin directory is executable
+chmod +x /opt/dell/bin/*
+
+# Make sure we can actaully install Crowbar
+chmod +x "$BASEDIR/extra/"*
+
+# Make sure the ownerships are correct
+chown -R crowbar.admin /opt/dell
+
+# Look for any crowbar specific kernel parameters
+for s in $(cat /proc/cmdline); do
+ VAL=${s#*=} # everything after the first =
+ case ${s%%=*} in # everything before the first =
+ crowbar.hostname) CHOSTNAME=$VAL;;
+ crowbar.url) CURL=$VAL;;
+ crowbar.use_serial_console)
+ sed -i "s/\"use_serial_console\": .*,/\"use_serial_console\": $VAL,/" /opt/dell/chef/data_bags/crowbar/bc-template-provisioner.json;;
+ crowbar.debug.logdest)
+ echo "*.* $VAL" >> /etc/rsyslog.d/00-crowbar-debug.conf
+ mkdir -p "$BASEDIR/rsyslog.d"
+ echo "*.* $VAL" >> "$BASEDIR/rsyslog.d/00-crowbar-debug.conf"
+ ;;
+ crowbar.authkey)
+ mkdir -p "/root/.ssh"
+ printf "$VAL\n" >>/root/.ssh/authorized_keys
+ printf "$VAL\n" >>/opt/dell/barclamps/provisioner/chef/cookbooks/provisioner/templates/default/authorized_keys.erb
+ ;;
+ crowbar.debug)
+ sed -i -e '/config.log_level/ s/^#//' \
+ -e '/config.logger.level/ s/^#//' \
+ /opt/dell/barclamps/crowbar/crowbar_framework/config/environments/production.rb
+ ;;
+
+ esac
+done
+
+if ! grep -q '192\.168\.124\.10' /etc/network/interfaces; then
+ cat >> /etc/network/interfaces <<EOF
+auto eth0
+iface eth0 inet static
+ address 192.168.124.10
+ netmask 255.255.255.0
+EOF
+fi
+
+if [[ $CHOSTNAME ]]; then
+ cat > /install_system.sh <<EOF
+#!/bin/bash
+set -e
+cd /tftpboot/ubuntu_dvd/extra
+./install $CHOSTNAME
+
+rm -f /etc/rc2.d/S99install
+rm -f /etc/rc3.d/S99install
+rm -f /etc/rc5.d/S99install
+
+rm -f /install_system.sh
+
+EOF
+
+ chmod +x /install_system.sh
+ ln -s /install_system.sh /etc/rc3.d/S99install
+ ln -s /install_system.sh /etc/rc5.d/S99install
+ ln -s /install_system.sh /etc/rc2.d/S99install
+
+fi
View
12 ubuntu-common/debsel.conf
@@ -0,0 +1,12 @@
+# New password for the 'admin' user in the Chef Server WebUI:
+chef-server-webui chef-server-webui/admin_password password password
+# New password for the 'chef' AMQP user in the RabbitMQ vhost "/chef":
+chef-solr chef-solr/amqp_password password password
+chef chef/chef_server_url string http://__HOSTNAME__:4000
+
+# Do you accept the DLJ license terms?
+sun-java6-bin shared/accepted-sun-dlj-v1-1 boolean true
+sun-java6-jre shared/accepted-sun-dlj-v1-1 boolean true
+sun-java6-jre sun-java6-jre/stopthread boolean true
+sun-java6-jre sun-java6-jre/jcepolicy note
+
View
20 ubuntu-common/install
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2011, Dell
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cd /tftpboot/ubuntu_dvd/extra
+script -f -c "./install-chef.sh $1" /var/log/install.log
+cd -
+
View
10 ubuntu-common/isolinux/isolinux.cfg
@@ -0,0 +1,10 @@
+include menu.cfg
+default CrowbarAdmin
+prompt 0
+timeout 10
+
+LABEL CrowbarAdmin
+ menu label ^Crowbar Admin Installation
+ kernel /install/vmlinuz
+ append file=/cdrom/preseed/crowbar_admin.seed debian-installer/locale=en_US.utf8 console-setup/layoutcode=us localechooser/translation/warn-light=true localechooser/translation/warn-severe=true cdrom-detect/load_floppy?=false cdrom-detect/load_media=false cdrom-detect/manual_config=true cdrom-detect/cdrom_module=none cdrom-detect/cdrom_device=/dev/scd0 initrd=/install/initrd.gz ramdisk_size=16384 root=/dev/ram rw quiet --
+
View
BIN ubuntu-common/isolinux/pxelinux.0
Binary file not shown.
View
11 ubuntu-common/isolinux/pxelinux.cfg/default
@@ -0,0 +1,11 @@
+include ../menu.cfg
+default CrowbarAdmin
+prompt 0
+timeout 10
+
+LABEL CrowbarAdmin
+ menu label ^Crowbar Admin installation
+ kernel ../ubuntu_dvd/install/netboot/ubuntu-installer/amd64/linux
+ append url=http://192.168.1.2:8091/ubuntu_dvd/preseed/crowbar_admin_net.seed debian-installer/locale=en_US.utf8 console-setup/layoutcode=us localechooser/translation/warn-light=true localechooser/translation/warn-severe=true netcfg/wireless_wep= netcfg/choose_interface=auto netcfg/dhcp_timeout=120 netcfg/get_hostname="redundant" initrd=../ubuntu_dvd/install/netboot/ubuntu-installer/amd64/initrd.gz ramdisk_size=16384 root=/dev/ram rw quiet --
+ IPAPPEND 2
+
View
5 ubuntu-common/packages/base.list
@@ -0,0 +1,5 @@
+## LEGACY:
+
+# All repo, package, and gem information that was here has been migrated to
+# barclamps/*/crowbar.yml. Base OS repositories and packages are in
+# barclamps/crowbar/crowbar.yml!
View
24 ubuntu-common/patches/bootif
@@ -0,0 +1,24 @@
+#!/bin/sh
+export DEBIAN_FRONTEND=none
+. /usr/share/debconf/confmodule
+bootif=`sed 's/.*BOOTIF=\([-a-z0-9]*\).*/\1/; t; d' /proc/cmdline`
+if [ "$bootif" ]; then
+ interface_found=""
+ # bootif is 01-00-19-b9-e1-c6-94, convert to MAC
+ mac=${bootif#01-}
+ mac=`echo $mac | sed 's/-/:/g'`
+ log-output -t ethdetect echo "Found bootif $bootif, looking for MAC $mac"
+ cd /sys/class/net
+ for interface in *; do
+ if [ `cat $interface/address` = "$mac" ]; then
+ log-output -t ethdetect echo "Found matching interface $interface"
+ interface_found=$interface
+ db_set netcfg/choose_interface $interface
+ # must mark question as seen otherwise you are reprompted
+ db_fset netcfg/choose_interface seen true
+ fi
+ done
+ if [ ! "$interface_found" ]; then
+ log-output -t ethdetect echo "No matching interface for MAC $mac"
+ fi
+fi
View
108 ubuntu-common/patches/data_item.rb
@@ -0,0 +1,108 @@
+#
+# Author:: Adam Jacob (<adam@opscode.com>)
+# Author:: Christopher Brown (<cb@opscode.com>)
+# Author:: Nuo Yan (<nuo@opscode.com>)
+# Copyright:: Copyright (c) 2008 Opscode, Inc.
+# License:: Apache License, Version 2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'chef/data_bag'
+require 'chef/data_bag_item'
+
+class DataItem < Application
+
+ provides :json
+
+ before :populate_data_bag
+ before :authenticate_every
+ before :is_admin, :only => [ :create, :update, :destroy ]
+
+ def populate_data_bag
+ begin
+ @data_bag = Chef::DataBag.cdb_load(params[:data_bag_id])
+ rescue Chef::Exceptions::CouchDBNotFound => e
+ raise NotFound, "Cannot load data bag #{params[:data_bag_id]}"
+ end
+ end
+
+ def show
+ begin
+ @data_bag_item = Chef::DataBagItem.cdb_load(params[:data_bag_id], params[:id])
+ rescue Chef::Exceptions::CouchDBNotFound => e
+ raise NotFound, "Cannot load data bag #{params[:data_bag_id]} item #{params[:id]}"
+ end
+ display @data_bag_item
+ end
+
+ def create
+ raw_data = nil
+ if params.has_key?("inflated_object")
+ raw_data = params["inflated_object"].raw_data
+ else
+ raw_data = params
+ raw_data.delete(:action)
+ raw_data.delete(:controller)
+ raw_data.delete(:data_bag_id)
+ end
+ @data_bag_item = nil
+ begin
+ @data_bag_item = Chef::DataBagItem.cdb_load(@data_bag.name, params[:id])
+ rescue Chef::Exceptions::CouchDBNotFound
+ @data_bag_item = Chef::DataBagItem.new
+ @data_bag_item.data_bag(@data_bag.name)
+ else
+ raise Conflict, "Databag Item #{params[:id]} already exists" if @data_bag_item
+ end
+ @data_bag_item.raw_data = raw_data
+ @data_bag_item.cdb_save
+ display @data_bag_item
+ end
+
+ def update
+ raw_data = nil
+ if params.has_key?("inflated_object")
+ raw_data = params["inflated_object"].raw_data
+ else
+ raw_data = params
+ raw_data.delete(:action)
+ raw_data.delete(:controller)
+ raw_data.delete(:data_bag_id)
+ end
+
+ begin
+ @data_bag_item = Chef::DataBagItem.cdb_load(@data_bag.name, params[:id])
+ rescue Chef::Exceptions::CouchDBNotFound => e
+ raise NotFound, "Cannot load Databag Item #{params[:id]}"
+ end
+
+ @data_bag_item.raw_data = raw_data
+ @data_bag_item.cdb_save
+ display @data_bag_item
+
+ end
+
+
+ def destroy
+ begin
+ @data_bag_item = Chef::DataBagItem.cdb_load(params[:data_bag_id], params[:id])
+ rescue Chef::Exceptions::CouchDBNotFound => e
+ raise NotFound, "Cannot load data bag #{params[:data_bag_id]} item #{params[:id]}"
+ end
+ @data_bag_item.cdb_destroy
+ @data_bag_item.couchdb_rev = nil
+ display @data_bag_item
+ end
+
+end
View
459 ubuntu-common/patches/rubygems.rb
@@ -0,0 +1,459 @@
+#
+# Author:: Adam Jacob (<adam@opscode.com>)
+# Author:: Daniel DeLeo (<dan@opscode.com>)
+# Copyright:: Copyright (c) 2008, 2010 Opscode, Inc.
+# License:: Apache License, Version 2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'chef/provider/package'
+require 'chef/mixin/command'
+require 'chef/resource/package'
+require 'chef/mixin/get_source_from_package'
+
+# Class methods on Gem are defined in rubygems
+require 'rubygems'
+# Ruby 1.9's gem_prelude can interact poorly with loading the full rubygems
+# explicitly like this. Make sure rubygems/specification is always last in this
+# list
+require 'rubygems/version'
+require 'rubygems/dependency'
+require 'rubygems/spec_fetcher'
+require 'rubygems/platform'
+require 'rubygems/format'
+require 'rubygems/dependency_installer'
+require 'rubygems/uninstaller'
+require 'rubygems/specification'
+
+class Chef
+ class Provider
+ class Package
+ class Rubygems < Chef::Provider::Package
+ class GemEnvironment
+ # HACK: trigger gem config load early. Otherwise it can get lazy
+ # loaded during operations where we've set Gem.sources to an
+ # alternate value and overwrite it with the defaults.
+ Gem.configuration
+
+ DEFAULT_UNINSTALLER_OPTS = {:ignore => true, :executables => true}
+
+ ##
+ # The paths where rubygems should search for installed gems.
+ # Implemented by subclasses.
+ def gem_paths
+ raise NotImplementedError
+ end
+
+ ##
+ # A rubygems source index containing the list of gemspecs for all
+ # available gems in the gem installation.
+ # Implemented by subclasses
+ # === Returns
+ # Gem::SourceIndex
+ def gem_source_index
+ raise NotImplementedError
+ end
+
+ ##
+ # Lists the installed versions of +gem_name+, constrained by the
+ # version spec in +gem_dep+
+ # === Arguments
+ # Gem::Dependency +gem_dep+ is a Gem::Dependency object, its version
+ # specification constrains which gems are returned.
+ # === Returns
+ # [Gem::Specification] an array of Gem::Specification objects
+ def installed_versions(gem_dep)
+ gem_source_index.search(gem_dep)
+ end
+
+ ##
+ # Yields to the provided block with rubygems' source list set to the
+ # list provided. Always resets the list when the block returns or
+ # raises an exception.
+ def with_gem_sources(*sources)
+ sources.compact!
+ original_sources = Gem.sources
+ Gem.sources = sources unless sources.empty?
+ yield
+ ensure
+ Gem.sources = original_sources
+ end
+
+ ##
+ # Determines the candidate version for a gem from a .gem file on disk
+ # and checks if it matches the version contraints in +gem_dependency+
+ # === Returns
+ # Gem::Version a singular gem version object is returned if the gem
+ # is available
+ # nil returns nil if the gem on disk doesn't match the
+ # version constraints for +gem_dependency+
+ def candidate_version_from_file(gem_dependency, source)
+ spec = Gem::Format.from_file_by_path(source).spec
+ if spec.satisfies_requirement?(gem_dependency)
+ logger.debug {"#{@new_resource} found candidate gem version #{spec.version} from local gem package #{source}"}
+ spec.version
+ else
+ # This is probably going to end badly...
+ logger.warn { "#{@new_resource} gem package #{source} does not satisfy the requirements #{gem_dependency.to_s}" }
+ nil
+ end
+ end
+
+ ##
+ # Finds the newest version that satisfies the constraints of
+ # +gem_dependency+. The version is determined from the cache or a
+ # round-trip to the server as needed. The architecture and gem
+ # sources will be set before making the query.
+ # === Returns
+ # Gem::Version a singular gem version object is returned if the gem
+ # is available
+ # nil returns nil if the gem could not be found
+ def candidate_version_from_remote(gem_dependency, *sources)
+ raise NotImplementedError
+ end
+
+ ##
+ # Find the newest gem version available from Gem.sources that satisfies
+ # the constraints of +gem_dependency+
+ def find_newest_remote_version(gem_dependency, *sources)
+ # DependencyInstaller sorts the results such that the last one is
+ # always the one it considers best.
+ spec_with_source = dependency_installer.find_gems_with_sources(gem_dependency).last
+
+ spec = spec_with_source && spec_with_source[0]
+ version = spec && spec_with_source[0].version
+ if version
+ logger.debug { "#{@new_resource} found gem #{spec.name} version #{version} for platform #{spec.platform} from #{spec_with_source[1]}" }
+ version
+ else
+ source_list = sources.compact.empty? ? "[#{Gem.sources.join(', ')}]" : "[#{sources.join(', ')}]"
+ logger.warn { "#{@new_resource} failed to find gem #{gem_dependency} from #{source_list}" }
+ nil
+ end
+ end
+
+ ##
+ # Installs a gem via the rubygems ruby API.
+ # === Options
+ # :sources rubygems servers to use
+ # Other options are passed to Gem::DependencyInstaller.new
+ def install(gem_dependency, options={})
+ with_gem_sources(*options.delete(:sources)) do
+ with_correct_verbosity do
+ dependency_installer(options).install(gem_dependency)
+ end
+ end
+ end
+
+ ##
+ # Uninstall the gem +gem_name+ via the rubygems ruby API. If
+ # +gem_version+ is provided, only that version will be uninstalled.
+ # Otherwise, all versions are uninstalled.
+ # === Options
+ # Options are passed to Gem::Uninstaller.new
+ def uninstall(gem_name, gem_version=nil, opts={})
+ gem_version ? opts[:version] = gem_version : opts[:all] = true
+ with_correct_verbosity do
+ uninstaller(gem_name, opts).uninstall
+ end
+ end
+
+ ##
+ # Set rubygems' user interaction to ConsoleUI or SilentUI depending
+ # on our current debug level
+ def with_correct_verbosity
+ Gem::DefaultUserInteraction.ui = Chef::Log.debug? ? Gem::ConsoleUI.new : Gem::SilentUI.new
+ yield
+ end
+
+ def dependency_installer(opts={})
+ Gem::DependencyInstaller.new(opts)
+ end
+
+ def uninstaller(gem_name, opts={})
+ Gem::Uninstaller.new(gem_name, DEFAULT_UNINSTALLER_OPTS.merge(opts))
+ end
+
+ private
+
+ def logger
+ Chef::Log.logger
+ end
+
+ end
+
+ class CurrentGemEnvironment < GemEnvironment
+
+ def gem_paths
+ Gem.path
+ end
+
+ def gem_source_index
+ Gem.source_index
+ end
+
+ def candidate_version_from_remote(gem_dependency, *sources)
+ with_gem_sources(*sources) do
+ find_newest_remote_version(gem_dependency, *sources)
+ end
+ end
+
+ end
+
+ class AlternateGemEnvironment < GemEnvironment
+ JRUBY_PLATFORM = /(:?universal|x86_64|x86)\-java\-[0-9\.]+/
+
+ def self.gempath_cache
+ @gempath_cache ||= {}
+ end
+
+ def self.platform_cache
+ @platform_cache ||= {}
+ end
+
+ include Chef::Mixin::ShellOut
+
+ attr_reader :gem_binary_location
+
+ def initialize(gem_binary_location)
+ @gem_binary_location = gem_binary_location
+ end
+
+ def gem_paths
+ if self.class.gempath_cache.key?(@gem_binary_location)
+ self.class.gempath_cache[@gem_binary_location]
+ else
+ # shellout! is a fork/exec which won't work on windows
+ shell_style_paths = shell_out!("#{@gem_binary_location} env gempath").stdout
+ # on windows, the path separator is (usually? always?) semicolon
+ paths = shell_style_paths.split(::File::PATH_SEPARATOR).map { |path| path.strip }
+ self.class.gempath_cache[@gem_binary_location] = paths
+ end
+ end
+
+ def gem_source_index
+ @source_index ||= Gem::SourceIndex.from_gems_in(*gem_paths.map { |p| p + '/specifications' })
+ end
+
+ ##
+ # Attempt to detect the correct platform settings for the target gem
+ # environment.
+ #
+ # In practice, this only makes a difference if different versions are
+ # available depending on platform, and only if the target gem
+ # environment has a radically different platform (i.e., jruby), so we
+ # just try to detect jruby and fall back to the current platforms
+ # (Gem.platforms) if we don't detect it.
+ #
+ # === Returns
+ # [String|Gem::Platform] returns an array of Gem::Platform-compatible
+ # objects, i.e., Strings that are valid for Gem::Platform or actual
+ # Gem::Platform objects.
+ def gem_platforms
+ if self.class.platform_cache.key?(@gem_binary_location)
+ self.class.platform_cache[@gem_binary_location]
+ else
+ gem_environment = shell_out!("#{@gem_binary_location} env").stdout
+ if jruby = gem_environment[JRUBY_PLATFORM]
+ self.class.platform_cache[@gem_binary_location] = ['ruby', Gem::Platform.new(jruby)]
+ else
+ self.class.platform_cache[@gem_binary_location] = Gem.platforms
+ end
+ end
+ end
+
+ def with_gem_platforms(*alt_gem_platforms)
+ alt_gem_platforms.flatten!
+ original_gem_platforms = Gem.platforms
+ Gem.platforms = alt_gem_platforms
+ yield
+ ensure
+ Gem.platforms = original_gem_platforms
+ end
+
+ def candidate_version_from_remote(gem_dependency, *sources)
+ with_gem_sources(*sources) do
+ with_gem_platforms(*gem_platforms) do
+ find_newest_remote_version(gem_dependency, *sources)
+ end
+ end
+ end
+
+ end
+