Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Split some Ubuntu specific stuff out build_crowbar.sh

  • Loading branch information...
commit 1437d69fffe12b1fa7758547980925a6cfa1cc4b 1 parent 689f0ed
Dell Openstack Crowbar Team authored
View
326 build_crowbar.sh
@@ -28,7 +28,7 @@
[[ $DEBUG ]] && {
set -x
- export PS4='(${nodename:-none})${BASH_SOURCE}@${LINENO}(${FUNCNAME[0]}): '
+ export PS4='${BASH_SOURCE}@${LINENO}(${FUNCNAME[0]}): '
}
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
@@ -47,14 +47,6 @@ IMAGE_DIR="$CACHE_DIR/image"
# Location we will stage the new openstack iso at.
BUILD_DIR="$CACHE_DIR/build"
-# Version of Ubuntu we are building openstack on to.
-UBUNTU_VERSION=10.10
-UBUNTU_CODENAME=maverick
-PPAS=("openstack-release/2011.2")
-
-# Server to download the mirror from if we need to.
-UBUNTU_ISO_MIRROR="http://mirror.anl.gov/pub"
-
# Directory that holds our Sledgehammer PXE tree.
SLEDGEHAMMER_PXE_DIR="$CACHE_DIR/tftpboot"
@@ -69,14 +61,14 @@ CROWBAR_DIR="${0%/*}"
SLEDGEHAMMER_DIR="${CROWBAR_DIR}/../sledgehammer"
VCS_CLEAN_CMD='git clean -f -x -d'
-# Arrays holding the additional debs, gems, and AMI images we will populate
+# Arrays holding the additional pkgs, gems, and AMI images we will populate
# Crowbar with.
-DEBS=()
+PKGS=()
GEMS=()
AMIS=("http://uec-images.ubuntu.com/releases/11.04/release/ubuntu-11.04-server-uec-amd64.tar.gz")
-die() { echo "$(date '+%F %T %z'): $*" >&2; exit 1; }
+die() { shift; echo "$(date '+%F %T %z'): $*" >&2; exit 1; }
debug() { echo "$(date '+%F %T %z'): $*" >&2; }
clean_dirs() {
local d=''
@@ -88,219 +80,23 @@ clean_dirs() {
done
}
-which debootstrap &>/dev/null || die "debootstrap must be installed! Exiting."
-which dpkg-scanpackages &>/dev/null || die "build-essential must be installed! Exiting."
-
-
-update_caches() {
- # Hold a list of directories we will need to umount
- TO_UMOUNT=()
-
- # Make sure our cache directories exist.
- mkdir -p "$DEB_CACHE"
- mkdir -p "$GEM_CACHE"
-
- # A little helper function for doing bind mounts.
- bind_mount() {
- TO_UMOUNT=("${TO_UMOUNT[@]}" "$2")
- [[ -d $2 ]] || mkdir -p "$2"
- grep -q "$2" /proc/self/mounts || sudo mount --bind "$1" "$2"
- }
-
- # A little helper for running commands in the chroot.
- in_chroot() { sudo -H chroot "$UBUNTU_CHROOT" "$@"; }
-
- # second, debootstrap a minimal install of our target version of
- # Ubuntu to ensure that we don't interfere with the host's package cache.
- debug "Making package-fetching chroot"
- mkdir -p "$UBUNTU_CHROOT"
- sudo mount -t tmpfs -o size=1G none "$UBUNTU_CHROOT"
- sudo debootstrap "$UBUNTU_CODENAME" "$UBUNTU_CHROOT" \
- "file://$BUILD_DIR" || \
- die "Could not bootstrap our scratch target!"
- # mount some important directories for the chroot
- for d in proc sys dev dev/pts; do
- bind_mount "/$d" "$UBUNTU_CHROOT/$d"
+OS_TO_STAGE="${1-ubuntu-10.10}"
+
+if ! [[ $OS_TO_STAGE && -d $CROWBAR_DIR/$OS_TO_STAGE-extra && \
+ -f $CROWBAR_DIR/$OS_TO_STAGE-extra/build_lib.sh ]]; then
+ cat <<EOF
+You must pass the name of the operating system you want to stage Crowbar
+on to. Valid choices are:
+EOF
+ cd "$CROWBAR_DIR"
+ for d in *-extra; do
+ [[ -d $d && -f $d/build_lib.sh ]] || continue
+ echo " ${d%-extra}"
done
- # make sure the chroot can resolve hostnames
- sudo cp /etc/resolv.conf "$UBUNTU_CHROOT/etc/resolv.conf"
+ exit 1
+fi
- # Make sure we are using a correctly prepopulated sources.list.
- sudo cp "$BUILD_DIR/extra/sources.list" \
- "$UBUNTU_CHROOT/etc/apt/sources.list"
-
- # if we have deb caches, copy them back in to save time on the downloads.
- sudo cp -a "$DEB_CACHE/." "$UBUNTU_CHROOT/var/cache/apt/archives/."
-
- debug "Fetching needed packages"
- # update, add infrastructure for adding PPAs,
- # add additional PPAs, and update again.
- in_chroot /usr/bin/apt-get -y --force-yes --allow-unauthenticated update
- in_chroot /usr/bin/apt-get -y --force-yes --allow-unauthenticated install \
- python-software-properties
- for ppa in "${PPAS[@]}"; do
- in_chroot apt-add-repository "ppa:$ppa"
- done
- # Get the key for the Opscode repo we are grabbing Chef bits from.
- wget -qO - http://apt.opscode.com/packages@opscode.com.gpg.key | \
- in_chroot /usr/bin/apt-key add -
- in_chroot /usr/bin/apt-get -y --force-yes --allow-unauthenticated update
-
- # Download all the packages apt thinks we will need.
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated --download-only install "${DEBS[@]}"
- # actually install ruby1.8-dev and gem and their deps.
- in_chroot /usr/bin/apt-get -y --force-yes \
- --allow-unauthenticated install ruby1.8-dev rubygems1.8 build-essential
- # install the gems we will need and all their dependencies
- # We will get some build failures, but at this point we don't care because
- # we are just caching the gems for the real install.
- debug "Fetching Gems"
- echo "There may be build failures here, we can safely ignore them."
- gem_re='([^0-9].*)-([0-9].*)'
- for gem in "${GEMS[@]}"; do
- if [[ $gem =~ $gem_re ]]; then
- echo "${BASH_REMATCH[*]}"
- gemname="${BASH_REMATCH[1]}"
- gemver="${BASH_REMATCH[2]}"
- else
- gemname="$gem"
- gemver=''
- fi
- gemopts=(install --no-ri --no-rdoc)
- [[ $gemver ]] && gemopts+=(--version "= ${gemver}")
- in_chroot /usr/bin/gem "${gemopts[@]}" "$gemname"
- done
- debug "Saving downloaded packages"
- # Save our updated gems and debs in the cache for later.
- cp -a "$UBUNTU_CHROOT/var/cache/apt/archives/." "$DEB_CACHE/."
- cp -a "$UBUNTU_CHROOT/var/lib/gems/1.8/cache/." "$GEM_CACHE/."
- sync
-
- debug "Cleaning up mounts"
- # umount all the stuff we have mounted for the chroot.
- while grep -q "$UBUNTU_CHROOT" /proc/self/mounts; do
- for m in "${TO_UMOUNT[@]}"; do sudo umount "$m"; sleep 1; done
- sudo umount "$UBUNTU_CHROOT"
- done
-}
-
-copy_debs() {
- # $1 = pool directory to build initial list of excludes against
- # $2 = directory to copy from
- # $3 = directory to copy to.
-
- # First, a couple of hashes to hold deb => revision values
- declare -A deb_pool
- declare -A dest_pool
-
- local debname=''
- local debarr=()
- local debs_to_copy=()
-
-
- # Scan through our pool to find debs we can easily omit.
- while read debname; do
- [[ -f $debname && $debname = *.deb ]] || continue
- debname="${debname##*/}"
- debarr=(${debname//_/ }) # split into (name version arch)
- deb_pool["${debarr[0]}"]="${debarr[1]}"
- done < <(find "$1" -name '*.deb')
-
- (
- cd "$2"
- for deb in *; do
- [[ -f $deb && $deb = *.deb ]] || continue
- debname="${deb##*/}" # don't care about the source path
- debname="${debname%_*.deb}" # don't care about the arch
- debver="${debname#*_}"
- debname="${debname%_*}"
- # First, have we already copied another version of this
- # deb? If so, decide whether to copy it or not.
- if [[ ${dest_pool["$debname"]} ]]; then
- # We have seen it. If the version we already copied
- # is older than this one, queue this for copying instead.
- # This relies on * expansion returning names in order.
- if [[ ${dest_pool["$debname"]} < $debver ]]; then
- debug "Omitting ${debname}_${dest_pool[$debname]} in favor of ${debname}_${debver}"
- debs_to_copy[$((${#debs_to_copy[@]} - 1))]="$deb"
- dest_pool["$debname"]="$debver"
- fi
- # Second check and see if it is already inthe install pool.
- # If it is and it is the same or lesser version, don't copy it
- elif [[ ${deb_pool["$debname"]} ]]; then
- if [[ ${deb_pool["$debname"]} < $debver ]]; then
- debs_to_copy+=("$deb")
- else
- debug "${debname}_${debver} in CD pool, omitting"
- fi
- else
- # It is not already in deb_pool or dest_pool, copy it.
- debs_to_copy+=("$deb")
- debug "Will copy ${debname}_${debver}"
- fi
- done
- # Now, we have a list of debs to copy, so do it.
- mkdir -p "$3"
- cp -t "$3" "${debs_to_copy[@]}"
- )
-}
-
-maybe_update_cache() {
- local pkgfile deb gem pkg_type rest need_update _pwd
- debug "Processing package lists"
- # Zero out our sources.list
- > "$BUILD_DIR/extra/sources.list"
- # Download and stash any extra files we may need
- # First, build our list of repos, ppas, debs, and gems
- for pkgfile in "$BUILD_DIR/extra/packages/"*.list; do
- [[ -f $pkgfile ]] || continue
- while read pkg_type rest; do
- case $pkg_type in
- repository)
- echo "${rest%%#*}" >> "$BUILD_DIR/extra/sources.list";;
- ppas) PPAS+=(${rest%%#*});;
- debs) DEBS+=(${rest%%#*});;
- gems) GEMS+=(${rest%%#*});;
- esac
- done <"$pkgfile"
- done
-
- _pwd=$PWD
- cd "$DEB_CACHE"
- # second, verify that the debs we need are in the cache.
- for deb in "${DEBS[@]}"; do
- [[ $(echo "$deb"*.deb) != "$deb*.deb" ]] || {
- need_update=true
- break
- }
- done
-
- cd "$GEM_CACHE"
- # third, verify that the gems we need are in the cache
- for gem in "${GEMS[@]}"; do
- [[ $(echo "$gem"*.gem) != "$gem*.gem" ]] || {
- need_update=true
- break
- }
- done
- cd "$_pwd"
-
- if [[ $need_update = true || \
- ( ! -d $DEB_CACHE ) || $* =~ update-cache ]]; then
- update_caches
- else
- return 0
- fi
-}
-
-for cmd in sudo chroot debootstrap mkisofs dpkg-scanpackages; do
- if which "$cmd" &>/dev/null; then continue; fi
- case $cmd in
- dpkg-scanpackages) die "Please install build-essential before trying to build Crowbar";;
- *) die "Please install $cmd before trying to build Crowbar.";;
- esac
-done
+. "$CROWBAR_DIR/$OS_TO_STAGE-extra/build_lib.sh"
{
# Make sure only one instance of the ISO build runs at a time.
@@ -318,25 +114,22 @@ done
# Finalize where we expect to find our caches and out chroot.
# If they were set in one of the conf files, don't touch them.
- # The directory we perform a minimal install of Ubuntu into if we need
- # to refresh our gem or deb caches
- [[ $UBUNTU_CHROOT ]] || UBUNTU_CHROOT="$CACHE_DIR/$UBUNTU_CODENAME.chroot"
+ # The directory we perform a minimal install into if we need
+ # to refresh our gem or pkg caches
+ [[ $CHROOT ]] || CHROOT="$CACHE_DIR/$OS_TOKEN.chroot"
- # Directories where we cache our debs, gems, and ami files
- [[ $DEB_CACHE ]] || DEB_CACHE="$CACHE_DIR/$UBUNTU_CODENAME/debs"
+ # Directories where we cache our pkgs, gems, and ami files
+ [[ $PKG_CACHE ]] || PKG_CACHE="$CACHE_DIR/$OS_TOKEN/pkgs"
[[ $GEM_CACHE ]] || GEM_CACHE="$CACHE_DIR/gems"
[[ $AMI_CACHE ]] || AMI_CACHE="$CACHE_DIR/amis"
- # The name of the Ubuntu iso we are using as a base.
- [[ $UBUNTU_ISO ]] || UBUNTU_ISO="ubuntu-$UBUNTU_VERSION-server-amd64.iso"
-
- # directory we will mount the Ubuntu .iso on to extract packages.
- [[ $UBUNTU_DIR ]] || UBUNTU_DIR="$IMAGE_DIR/${UBUNTU_ISO%.iso}"
+ # directory we will mount the .iso on to extract packages.
+ [[ $IMG_MNTPT ]] || IMG_MNTPT="$IMAGE_DIR/${ISO%.iso}"
-
# Make any directories we don't already have
- for d in "$ISO_LIBRARY" "$ISO_DEST" "$IMAGE_DIR" "$BUILD_DIR" "$AMI_CACHE" \
- "$SLEDGEHAMMER_PXE_DIR" "$UBUNTU_CHROOT"; do
+ for d in "$PKG_CACHE" "$GEM_CACHE" "$ISO_LIBRARY" "$ISO_DEST" \
+ "$IMAGE_DIR" "$BUILD_DIR" "$AMI_CACHE" \
+ "$SLEDGEHAMMER_PXE_DIR" "$CHROOT"; do
mkdir -p "$d"
done
@@ -356,24 +149,19 @@ done
die "Could not download $ami"
done
- # Try and download our ISO if we don't already have it
- [[ -f $ISO_LIBRARY/$UBUNTU_ISO ]] || {
- echo "$(date '+%F %T %z'): Downloading and caching $UBUNTU_ISO"
- curl -o "$ISO_LIBRARY/$UBUNTU_ISO" \
- "$UBUNTU_ISO_MIRROR/ubuntu-iso/CDs/$UBUNTU_VERSION/$UBUNTU_ISO" || \
- die "Missing our Ubuntu source image"
- }
+ # Fetch the OS ISO if we need to.
+ [[ -f $ISO_LIBRARY/$ISO ]] || fetch_os_iso
# Start with a clean slate.
- clean_dirs "$UBUNTU_DIR" "$BUILD_DIR"
+ clean_dirs "$IMG_MNTPT" "$BUILD_DIR"
(cd "$CROWBAR_DIR"; $VCS_CLEAN_CMD)
- # Copy everything off the Ubuntu ISO to our build directory
- debug "Copying Ubuntu off $UBUNTU_ISO"
- sudo mount -t iso9660 -o loop "$ISO_LIBRARY/$UBUNTU_ISO" "$UBUNTU_DIR" || \
- die "Could not mount $UBUNTU_ISO"
- cp -rT "$UBUNTU_DIR" "$BUILD_DIR"
- sudo umount -d "$UBUNTU_DIR"
+ # Copy everything off the ISO to our build directory
+ debug "Copying off $ISO"
+ sudo mount -t iso9660 -o loop "$ISO_LIBRARY/$ISO" "$IMG_MNTPT" || \
+ die "Could not mount $ISO"
+ cp -rT "$IMG_MNTPT" "$BUILD_DIR"
+ sudo umount -d "$IMG_MNTPT"
# Make everything writable again.
chmod -R u+w "$BUILD_DIR"
@@ -385,48 +173,24 @@ done
# Copy over the Crowbar bits and their prerequisites
debug "Staging extra Crowbar bits"
- cp -r "$CROWBAR_DIR/ubuntu-$UBUNTU_VERSION-extra"/* "$BUILD_DIR/extra"
+ cp -r "$CROWBAR_DIR/$OS_TOKEN-extra"/* "$BUILD_DIR/extra"
cp -r "$CROWBAR_DIR/change-image"/* "$BUILD_DIR"
# If we were asked to update our cache, do it.
maybe_update_cache "$@"
- # Copy our extra debs, gems, and amis over
- debug "Copying debs, gems, and amis"
- copy_debs "$BUILD_DIR/pool" "$DEB_CACHE" "$BUILD_DIR/extra/debs"
+ # Copy our extra pkgs, gems, and amis over
+ debug "Copying pkgs, gems, and amis"
+ copy_pkgs "$BUILD_DIR/pool" "$PKG_CACHE" "$BUILD_DIR/extra/pkgs"
cp -r "$GEM_CACHE" "$BUILD_DIR/extra"
cp -r "$AMI_CACHE/." "$BUILD_DIR/ami/."
- # Make our new packages repository.
- ( cd "$BUILD_DIR/extra"
- debug "Recreating Packages.gz"
- dpkg-scanpackages debs /dev/null |gzip -9 >Packages.gz)
+ reindex_packages
# Store off the version
echo "$VERSION" >> "$BUILD_DIR/dell/Version"
- # Fix up the initrd
- ( cd "$CROWBAR_DIR/initrd"
- debug "Fixing up initrd"
- [[ -d scratch ]] && rm -rf scratch
- mkdir scratch
- # Grab _all_ the nic drivers. We probably don't need them,
- # but a little paranoia never hurt anyone.
- ( cd scratch;
- debug "Adding all nic drivers"
- for udeb in "$BUILD_DIR/pool/main/l/linux/"nic-*-generic-*.udeb; do
- ar x "$udeb"
- tar xzf data.tar.gz
- rm -rf debian-binary *.tar.gz
- done
- # Make sure installing off a USB connected DVD will work
- debug "Adding USB connected DVD support"
- mkdir -p var/lib/dpkg/info
- cp ../cdrom-detect.postinst var/lib/dpkg/info
- # Append our new gzipped CPIO archive onto the old one.
- find . |cpio --create --format=newc --owner root:root 2>/dev/null | \
- gzip -9 >> "$BUILD_DIR/install/initrd.gz" )
- rm -rf scratch )
-
+ final_build_fixups
+
# Copy over the Sledgehammer bits
debug "Copying over Sledgehammer bits"
for d in "$CROWBAR_DIR/"updates*; do
View
269 ubuntu-10.10-extra/build_lib.sh
@@ -0,0 +1,269 @@
+#!/bin/bash
+# This file is sourced by build_crowbar.sh when you want to build Crowbar
+# using Ubuntu 10.10 as the base OS. It includes all Ubuntu 10.10 specific
+# build routines.
+
+# OS information for the OS we are building openstack on to.
+OS=ubuntu
+OS_VERSION=10.10
+OS_TOKEN="$OS-$OS_VERSION"
+OS_CODENAME=maverick
+# Server to download the mirror from if we need to.
+ISO_MIRROR="http://mirror.anl.gov/pub"
+
+# The name of the OS iso we are using as a base.
+[[ $ISO ]] || ISO="ubuntu-$OS_VERSION-server-amd64.iso"
+
+fetch_os_iso() {
+ # Try and download our ISO if we don't already have it
+ echo "$(date '+%F %T %z'): Downloading and caching $ISO"
+ curl -o "$ISO_LIBRARY/$ISO" \
+ "$ISO_MIRROR/ubuntu-iso/CDs/$OS_VERSION/$ISO" || \
+ die 1 "Missing our source image"
+}
+
+update_caches() {
+ # Hold a list of directories we will need to umount
+ TO_UMOUNT=()
+
+ # A little helper function for doing bind mounts.
+ bind_mount() {
+ TO_UMOUNT=("${TO_UMOUNT[@]}" "$2")
+ [[ -d $2 ]] || mkdir -p "$2"
+ grep -q "$2" /proc/self/mounts || sudo mount --bind "$1" "$2"
+ }
+
+ # A little helper for running commands in the chroot.
+ in_chroot() { sudo -H chroot "$CHROOT" "$@"; }
+
+ # second, debootstrap a minimal install of our target version of
+ # Ubuntu to ensure that we don't interfere with the host's package cache.
+ debug "Making package-fetching chroot"
+ mkdir -p "$CHROOT"
+ sudo mount -t tmpfs -o size=1G none "$CHROOT"
+ sudo debootstrap "$OS_CODENAME" "$CHROOT" \
+ "file://$BUILD_DIR" || \
+ die 1 "Could not bootstrap our scratch target!"
+ # mount some important directories for the chroot
+ for d in proc sys dev dev/pts; do
+ bind_mount "/$d" "$CHROOT/$d"
+ done
+ # make sure the chroot can resolve hostnames
+ sudo cp /etc/resolv.conf "$CHROOT/etc/resolv.conf"
+
+ # Make sure we are using a correctly prepopulated sources.list.
+ sudo cp "$BUILD_DIR/extra/sources.list" \
+ "$CHROOT/etc/apt/sources.list"
+
+ # if we have deb caches, copy them back in to save time on the downloads.
+ sudo cp -a "$PKG_CACHE/." "$CHROOT/var/cache/apt/archives/."
+
+ debug "Fetching needed packages"
+ # update, add infrastructure for adding PPAs,
+ # add additional PPAs, and update again.
+ in_chroot /usr/bin/apt-get -y --force-yes --allow-unauthenticated update
+ in_chroot /usr/bin/apt-get -y --force-yes --allow-unauthenticated install \
+ python-software-properties
+ for ppa in "${PPAS[@]}"; do
+ in_chroot apt-add-repository "ppa:$ppa"
+ done
+ # Get the key for the Opscode repo we are grabbing Chef bits from.
+ wget -qO - http://apt.opscode.com/packages@opscode.com.gpg.key | \
+ in_chroot /usr/bin/apt-key add -
+ in_chroot /usr/bin/apt-get -y --force-yes --allow-unauthenticated update
+
+ # Download all the packages apt thinks we will need.
+ in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated --download-only install "${PKGS[@]}"
+ # actually install ruby1.8-dev and gem and their deps.
+ in_chroot /usr/bin/apt-get -y --force-yes \
+ --allow-unauthenticated install ruby1.8-dev rubygems1.8 build-essential
+ # install the gems we will need and all their dependencies
+ # We will get some build failures, but at this point we don't care because
+ # we are just caching the gems for the real install.
+ debug "Fetching Gems"
+ echo "There may be build failures here, we can safely ignore them."
+ gem_re='([^0-9].*)-([0-9].*)'
+ for gem in "${GEMS[@]}"; do
+ if [[ $gem =~ $gem_re ]]; then
+ echo "${BASH_REMATCH[*]}"
+ gemname="${BASH_REMATCH[1]}"
+ gemver="${BASH_REMATCH[2]}"
+ else
+ gemname="$gem"
+ gemver=''
+ fi
+ gemopts=(install --no-ri --no-rdoc)
+ [[ $gemver ]] && gemopts+=(--version "= ${gemver}")
+ in_chroot /usr/bin/gem "${gemopts[@]}" "$gemname"
+ done
+ debug "Saving downloaded packages"
+ # Save our updated gems and pkgs in the cache for later.
+ cp -a "$CHROOT/var/cache/apt/archives/." "$PKG_CACHE/."
+ cp -a "$CHROOT/var/lib/gems/1.8/cache/." "$GEM_CACHE/."
+ sync
+
+ debug "Cleaning up mounts"
+ # umount all the stuff we have mounted for the chroot.
+ while grep -q "$CHROOT" /proc/self/mounts; do
+ for m in "${TO_UMOUNT[@]}"; do sudo umount "$m"; sleep 1; done
+ sudo umount "$CHROOT"
+ done
+}
+
+copy_pkgs() {
+ # $1 = pool directory to build initial list of excludes against
+ # $2 = directory to copy from
+ # $3 = directory to copy to.
+
+ # First, a couple of hashes to hold deb => revision values
+ declare -A deb_pool
+ declare -A dest_pool
+
+ local debname=''
+ local debarr=()
+ local pkgs_to_copy=()
+
+
+ # Scan through our pool to find pkgs we can easily omit.
+ while read debname; do
+ [[ -f $debname && $debname = *.deb ]] || continue
+ debname="${debname##*/}"
+ debarr=(${debname//_/ }) # split into (name version arch)
+ deb_pool["${debarr[0]}"]="${debarr[1]}"
+ done < <(find "$1" -name '*.deb')
+
+ (
+ cd "$2"
+ for deb in *; do
+ [[ -f $deb && $deb = *.deb ]] || continue
+ debname="${deb##*/}" # don't care about the source path
+ debname="${debname%_*.deb}" # don't care about the arch
+ debver="${debname#*_}"
+ debname="${debname%_*}"
+ # First, have we already copied another version of this
+ # deb? If so, decide whether to copy it or not.
+ if [[ ${dest_pool["$debname"]} ]]; then
+ # We have seen it. If the version we already copied
+ # is older than this one, queue this for copying instead.
+ # This relies on * expansion returning names in order.
+ if [[ ${dest_pool["$debname"]} < $debver ]]; then
+ debug "Omitting ${debname}_${dest_pool[$debname]} in favor of ${debname}_${debver}"
+ pkgs_to_copy[$((${#pkgs_to_copy[@]} - 1))]="$deb"
+ dest_pool["$debname"]="$debver"
+ fi
+ # Second check and see if it is already inthe install pool.
+ # If it is and it is the same or lesser version, don't copy it
+ elif [[ ${deb_pool["$debname"]} ]]; then
+ if [[ ${deb_pool["$debname"]} < $debver ]]; then
+ pkgs_to_copy+=("$deb")
+ else
+ debug "${debname}_${debver} in CD pool, omitting"
+ fi
+ else
+ # It is not already in deb_pool or dest_pool, copy it.
+ pkgs_to_copy+=("$deb")
+ debug "Will copy ${debname}_${debver}"
+ fi
+ done
+ # Now, we have a list of pkgs to copy, so do it.
+ mkdir -p "$3"
+ cp -t "$3" "${pkgs_to_copy[@]}"
+ )
+}
+
+maybe_update_cache() {
+ local pkgfile deb gem pkg_type rest need_update _pwd
+ debug "Processing package lists"
+ # Zero out our sources.list
+ > "$BUILD_DIR/extra/sources.list"
+ # Download and stash any extra files we may need
+ # First, build our list of repos, ppas, pkgs, and gems
+ for pkgfile in "$BUILD_DIR/extra/packages/"*.list; do
+ [[ -f $pkgfile ]] || continue
+ while read pkg_type rest; do
+ case $pkg_type in
+ repository)
+ echo "${rest%%#*}" >> "$BUILD_DIR/extra/sources.list";;
+ ppas) PPAS+=(${rest%%#*});;
+ pkgs) PKGS+=(${rest%%#*});;
+ gems) GEMS+=(${rest%%#*});;
+ esac
+ done <"$pkgfile"
+ done
+
+ _pwd=$PWD
+
+ # move old debs if they exist
+ if [[ -d $CACHE_DIR/$OS_CODENAME/debs ]]; then
+ mv "$CACHE_DIR/$OS_CODENAME/debs"/* "$PKG_CACHE"
+ rm -rf "$CACHE_DIR/$OS_CODENAME/debs"
+ fi
+
+ cd "$PKG_CACHE"
+ # second, verify that the pkgs we need are in the cache.
+ for deb in "${PKGS[@]}"; do
+ [[ $(echo "$deb"*.deb) != "$deb*.deb" ]] || {
+ need_update=true
+ break
+ }
+ done
+
+ cd "$GEM_CACHE"
+ # third, verify that the gems we need are in the cache
+ for gem in "${GEMS[@]}"; do
+ [[ $(echo "$gem"*.gem) != "$gem*.gem" ]] || {
+ need_update=true
+ break
+ }
+ done
+ cd "$_pwd"
+
+ if [[ $need_update = true || \
+ ( ! -d $PKG_CACHE ) || $* =~ update-cache ]]; then
+ update_caches
+ else
+ return 0
+ fi
+}
+
+reindex_packages() (
+ # Make our new packages repository.
+ cd "$BUILD_DIR/extra"
+ debug "Recreating Packages.gz"
+ dpkg-scanpackages pkgs /dev/null 2>/dev/null |gzip -9 >Packages.gz
+)
+
+final_build_fixups() {
+ # Copy our isolinux and preseed files.
+ cp -r "$BUILD_DIR/extra/isolinux"/* "$BUILD_DIR/isolinux"
+ cp -r "$BUILD_DIR/extra/preseed"/* "$BUILD_DIR/preseed"
+ # Fix up the initrd
+ ( cd "$CROWBAR_DIR/initrd"
+ debug "Fixing up initrd"
+ [[ -d scratch ]] && rm -rf scratch
+ mkdir scratch
+ # Grab _all_ the nic drivers. We probably don't need them,
+ # but a little paranoia never hurt anyone.
+ ( cd scratch;
+ debug "Adding all nic drivers"
+ for udeb in "$BUILD_DIR/pool/main/l/linux/"nic-*-generic-*.udeb; do
+ ar x "$udeb"
+ tar xzf data.tar.gz
+ rm -rf debian-binary *.tar.gz
+ done
+ # Make sure installing off a USB connected DVD will work
+ debug "Adding USB connected DVD support"
+ mkdir -p var/lib/dpkg/info
+ cp ../cdrom-detect.postinst var/lib/dpkg/info
+ # Append our new gzipped CPIO archive onto the old one.
+ find . |cpio --create --format=newc --owner root:root 2>/dev/null | \
+ gzip -9 >> "$BUILD_DIR/install/initrd.gz" )
+ rm -rf scratch )
+
+}
+
+for cmd in sudo chroot debootstrap mkisofs dpkg-scanpackages; do
+ which "$cmd" &>/dev/null || \
+ die 1 "Please install $cmd before trying to build Crowbar."
+done
View
0  change-image/isolinux/isolinux.cfg → ubuntu-10.10-extra/isolinux/isolinux.cfg
File renamed without changes
View
0  change-image/isolinux/pxelinux.0 → ubuntu-10.10-extra/isolinux/pxelinux.0
File renamed without changes
View
0  change-image/isolinux/pxelinux.cfg/default → ...10.10-extra/isolinux/pxelinux.cfg/default
File renamed without changes
View
0  change-image/preseed/openstack_admin.seed → ...-10.10-extra/preseed/openstack_admin.seed
File renamed without changes
Please sign in to comment.
Something went wrong with that request. Please try again.