Permalink
Switch branches/tags
Nothing to show
Find file
Fetching contributors…
Cannot retrieve contributors at this time
executable file 1278 lines (1162 sloc) 59.1 KB
#!/bin/bash
# figtree; an Arch Linux AIF module to create conFIG TREEs from local or remote profiles
# Ethan Schoonover <es@ethanschoonover.com>
# Please see the http://ethanschoonover.com/figtree for details and usage
# This procedure is similar to (and builds on) the standard AIF core automatic
# procedure. Indeed, it can be used as an almost seamless drop in replacement for
# it, though if your needs are met by the standard AIF core automatic procedure,
# it is recommended to use that as it has undergone considerably more testing.
#
# figtree automatic differs from the core automatic procedure in a several respects:
#
# 1. It can be run from a remote path.
# 2. It can load both remote and local profiles.
# 3. These profiles can be standard AIF profiles or "figtree profiles" which
# support more complex system configuration at install time
# 4. Figtree also supports installation of packages from the AUR automatically
#
# While this may sound nice, please be aware that figtree is very experimental.
# It is entirely possible to think you are picking a fig and come away with
# a lemon, so please report bugs on the github tracker at:
#
# http://github.com/altercation/figtree
#
# figtree profiles use a handful of simple AIF related functions to add packages,
# system configuration variables, and complete system files during AIF
# installation. These profiles can be broken up into modular, scope independent
# files such as profiles for specific hardware, profiles for specific desktop
# environments, or user profiles. Each of these "scoped" profiles can be imported
# into a final "master" profile (though there is no fundamental difference
# between any of them).
#
# # packages in figtree profiles will be sourced from the official arch repositories and, if not found,
# # will be sourced from AUR automatically (there should be no overlap between the official repos and
# # the AUR, by policy, so all package names should be unique).
#
# command line examples (don't forget the optional -d for debugging):
#
# REMOTE PROCEDURE & REMOTE PROFILE
# aif -p http://github.com/user/figtree/raw/master/procedures/automatic \
# -c profiles/my-laptop
#
# REMOTE PROCEDURE & REMOTE PROFILE (DIFFERENT SOURCE URLS)
# aif -p http://github.com/user/figtree/raw/master/procedures/automatic \
# -c http://github.com/OTHERUSER/figtree/profiles/my-laptop
#
# LOCAL PROCEDURE & REMOTE PROFILE
# aif -p figtree/automatic \
# -c https://github.com/altercation/figtree/profiles/my-laptop
#
# LOCAL PROCEDURE & LOCAL PROFILE
# aif -p figtree/automatic -c profiles/my-desktop
#
# LOCAL PROCEDURE & LOCAL PROFILE (can be any figtree profile)
# aif -p figtree/automatic -c systems/lenovo/x220/profile
# ----------------------------------------------------------------------
# procedure specific definitions
# ----------------------------------------------------------------------
depend_procedure core automatic
var_MODULE_NAME=figtree
var_MODULE_ACTION="IRREVOCABLE FILESYSTEM CHANGES:\nThis procedure performs a complete system erasure and automated installation." \
var_PROCEDURE_SUBPATH=/procedures/automatic
var_TEMP_DIR="${RUNTIME_DIR}/${var_MODULE_NAME}-temp"
[[ -e "$var_TEMP_DIR" ]] && rm -rf "$var_TEMP_DIR" || true; mkdir -p "$var_TEMP_DIR"
var_KNOWN_SUBDIRS=(procedures libs profiles systems environments applications users examples peripherals filesystems) # figtree specific
var_AIF_PROCEDURE_PATH="$procedure"; [[ "$module" == "http" ]] && var_AIF_PROCEDURE_PATH="$2" || true
#var_MODULE_PATHS[0]="${var_AIF_PROCEDURE_PATH%$var_PROCEDURE_SUBPATH*}"
var_MODULE_PATHS=("${var_AIF_PROCEDURE_PATH%$var_PROCEDURE_SUBPATH*}" "/usr/lib/aif/user/figtree")
# Runtime packages are used during installation but are not installed to the target system unless included in TARGET_PACKAGES as well
var_RUNTIME_REPOSITORIES= # var_RUNTIME_REPOSITORIES=(archlinuxfr "http://repo.archlinux.fr/${var_ARCH}")
var_RUNTIME_PACKAGES="automake make patch" # "git gcc autoconf automake bison fakeroot flex gcc libtool m4 make patch pkg-config"
# Preload packages are used prior to profile load (thus are required to bootstrap the rest of the profile based runtime/target package install)
# They use the var_RUNTIME_REPOSITORIES value above
var_RUNTIME_PACKAGES_PRELOAD="git" # yaourt mercurial svn
var_TARGET_GROUPS="base" # this is problematic... I should identify a better way to do this
var_POST_OVERLAY= # unset by default; used in partial-update-overlay
div="-------------------------------------------------------------------------------"
# ----------------------------------------------------------------------
# PHASE WORKER CUSTOMIZATIONS
# ----------------------------------------------------------------------
# we reorder phase_preparation so that if we're running as a local
# procedure from install media, we set up the network first and bring in
# the required runtime packages (primarily for our vcs remote sourcing)
# phase_basics=(set_clock prepare_disks) # defaults, here for reference during module development
# phase_system=(package_list install_packages configure_system install_bootloader) # defaults, here for reference
# phase_finish=(msg_report) # defaults, for reference
#phase_basics=() # off for testing DEBUG
#phase_system=() # off for testing DEBUG
phase_preparation=(preconfigure intro runtime_network preload configure select_source runtime_repositories runtime_packages)
#phase_preparation=(preconfigure intro runtime_network preload configure select_source) # DEBUG
phase_system+=(write_configs write_overlay write_coda) # module specific custom workers (see below)
# ----------------------------------------------------------------------
# DEFAULT VALUES
# ----------------------------------------------------------------------
# overridden by values in profiles. profile variables are identical but
# without the default_ prefix. If the profile fails so set a value the
# default as listed here is used.
#
# these differ from variables with a var_ previx in an important way:
# default_ variables can be completely overriden, as I use them here
# while var_ variables (such as var_RUNTIME_REPOSITORIES) that are set
# above in this procedure are *retained* (and possibly added to user
# values) so, for example, var_RUNTIME_REPOSITORIES retains the value
# assigned above and takes any extra values the user has assigned to
# RUNTIME_REPOSITORIES in their profiles
# override anything pulled in from depend_procedures (so we have a
# tabula rasa and can be sure we have *only* values from this
# proceudre or from the custom profiles
HOSTNAME=
LOCALE=
DAEMON_LOCALE=
HARDWARECLOCK=
TIMEZONE=
KEYMAP=
CONSOLEFONT=
CONSOLEMAP=
USECOLOR=
NETWORK_PERSIST=
NETWORKS=
# default /etc/rc.conf values
default_rc_HOSTNAME="archlinux"
default_rc_LOCALE="en_US.UTF-8"
default_rc_DAEMON_LOCALE="no"
default_rc_HARDWARECLOCK="UTC"
default_rc_TIMEZONE="Canada/Pacific"
default_rc_KEYMAP="us"
default_rc_CONSOLEFONT=
default_rc_CONSOLEMAP=
default_rc_USECOLOR="yes"
default_rc_NETWORK_PERSIST="no"
default_rc_NETWORKS=(main home work)
# default filesystem values
default_fs_GRUB_DEVICE=/dev/sda
default_fs_PARTITIONS='/dev/sda 100:ext2:+ 512:swap *:ext4'
default_fs_BLOCKDATA='/dev/sda1 raw no_label ext2;yes;/boot;target;no_opts;no_label;no_params
/dev/sda2 raw no_label swap;yes;no_mountpoint;target;no_opts;no_label;no_params
/dev/sda3 raw no_label ext4;yes;/;target;no_opts;no_label;no_params'
# default install options
default_in_RANKMIRRORS=10 # 0 means don't run; RANKMIRRORS can be set in profiles, 10 is a good number
# ----------------------------------------------------------------------
# parameters and aif.sh related functions
# ----------------------------------------------------------------------
# default intro summary descriptions
# ----------------------------------------------------------------------
#declare -A intro_AUTOMATIC_PROFILE intro_REPO_TYPE intro_AUR_SUPPORT intro_SINGLEPROFILE intro_WIPE_OVERLAY intro_FORCE_OVERWRITE intro_NO_BACKUPS intro_DIFFS
warn_ALL='Run this procedure again with the -h flag to see all options.'
warn_AUTOMATIC_PROFILE='-c requires one or more profiles listed after it.'
#intro_AUTOMATIC_PROFILE='(-c) CRITICAL: must specify a profile using the -c PROFILE parameter.'
#option_AUTOMATIC_PROFILE=' -c Profile present. The config profile has been set successfully.'
#usage_AUTOMATIC_PROFILE='
#-c path Specify a profile path.
# The path may specified in the following formats:
#
# relative: profiles/my-laptop
# absolute: ~/aif-files/figtree/profiles/my-laptop
# remote: http://github.com/name/repo/profiles/my-pc
#
# Note! If you have *already* sourced this *procedure* remotely
# from a repository, it is fine to just use the relative path
# pattern above (see first usage example above).'
warn_REPO_TYPE='-r requires a repository type to be specified (git|hg|svn|wget)'
intro_REPO_TYPE='(-r) Force repo OFF. Remote repository type will be detected AUTOMATICALLY.'
option_REPO_TYPE=' -r Force repo ON. Remote repository type has been set manually.'
usage_REPO_TYPE='
-r repotype Force repository type
This procedure can normally automatically detect the type of
respository used in a remote profile or procedure URL. If this
automatic detection fails for some reason, specifying a one of
the following values will force the type to one of:
git hg|mercurial svn|subversion wget
NOTE/TODO: wget not yet implemented'
intro_AUR_SUPPORT='(-a) AUR support OFF. Packages not in official repos will be skipped.'
option_AUR_SUPPORT=' -a AUR support ON. Packages not in official repos will be sourced from AUR.'
usage_AUR_SUPPORT='
-a Enable AUR support (experimental)
Packages includes in a profile that cannot be located in an
official repository will be sourced from AUR if present.'
intro_WIPE_OVERLAY='(-w) Wipe overlay directory OFF. Old overlay directory will not be deleted.'
option_WIPE_OVERLAY=' -w Wipe overlay directory ON. Old overlay directory will be DELETED first.'
usage_WIPE_OVERLAY='
-w Wipe overwrite directory (DANGER!)
Completely removes existing overlay directory prior to copying
new overlay files over. Will operate recursively if the -r
parameter is also present. Be careful! Use version control.'
intro_FORCE_OVERWRITE='(-f) Force overwrite OFF. Prompts given prior to overwrite events.'
option_FORCE_OVERWRITE=' -f Force overwrite ON. *NO* PROMPTS given prior to overwrite events.'
usage_FORCE_OVERWRITE='
-f Force overwrite mode (DANGER!)
Does not prompt for action when scanning files; simply
overwrites all overlay files in the target profile
subdirectory. Overwritten files will be backed up unless the
-n command line parameter is also specified.'
intro_NO_BACKUPS='(-n) No-backups mode OFF. Backups WILL BE created during any overwrite.'
option_NO_BACKUPS=' -n No-backups mode ON. *NO* BACKUP FILES created during overwrites.'
usage_NO_BACKUPS='
-n No backup file. When writing an overlay file, any existing
overlay file in that directory with the same name will normally
be backed up first. Use of the '-n' parameter will stop backups
from being written; use with caution or with version control!'
intro_SINGLEPROFILE='(-s) Single-profile mode OFF. ALL LINKED PROFILES processed recursively.'
option_SINGLEPROFILE=' -s Single-profile mode ON. NO LINKED PROFILES will be processed.'
usage_SINGLEPROFILE='
-s Single-profile mode.
Normally, all profiles referenced by master profiles will be
processed by the procedure recursively. Single-profile mode
references only the profile(s) *explicitly* listed on the
command line.'
intro_DIFFS='(-D) Diff mode OFF. No file diff information will be shown during overwrites.'
option_DIFFS=' -D Diff mode ON. File diffs will be displayed prior to file overwrites.'
usage_DIFFS='
-D Report file/value diffs (used in partial procedures)'
# aif general defaults
# ----------------------------------------------------------------------
var_AUTOMATIC_PROFILE= # automatic partial-update-overlay
# parameter related defaults
# ----------------------------------------------------------------------
var_REPO_TYPE= # automatic
var_AUR_SUPPORT= # automatic
var_SINGLEPROFILE= # automatic partial-update-overlay
var_WIPE_OVERLAY= # partial-update-overlay
var_FORCE_OVERWRITE= # partial-update-overlay
var_NO_BACKUPS= # automatic partial-update-overaly
var_DIFFS= # partial-update-overlay
default_answer=yes
unused_opts="WIPE_OVERLAY FORCE_OVERWRITE DIFFS"
# turn off items not used in this procedure
# ----------------------------------------------------------------------
var_OPTS_STRING="c:r:aswfnD"
process_args ()
{
dfunc
[[ "$1" == "-:" ]] && shift
local arg="-${1#-}" # normalize edge case of 'aif -p figtree/procedurename -c' with no profile (we get passed -: as an argument)
case $arg in
-c) [[ -n "$2" ]] && var_AUTOMATIC_PROFILE=$2 || die_error $warn_AUTOMATIC_PROFILE ;;
-r) [[ -n "$2" ]] && (var_REPO_TYPE=$2; intro_REPO_TYPE=$option_REPO_TYPE) || die_error $warn_REPO_TYPE ;;
-a) var_AUR_SUPPORT=1; intro_AUR_SUPPORT="$option_AUR_SUPPORT" ;;
-s) var_SINGLEPROFILE=1; intro_SINGLEPROFILE="$option_SINGLEPROFILE" ;;
-w) var_WIPE_OVERLAY=1; intro_WIPE_OVERLAY="$option_WIPE_OVERLAY"; default_answer=no ;;
-f) var_FORCE_OVERWRITE=1; intro_FORCE_OVERWRITE="$option_FORCE_OVERWRITE"; default_answer=no ;;
-n) var_NO_BACKUPS=1; intro_NO_BACKUPS="$option_NO_BACKUPS"; default_answer=no ;;
-D) var_DIFFS=1; intro_DIFFS="$option_DIFFS" ;;
*)
unset_opts # remove opt information for items we don't use in in the calling procedure
var_ARGS_USAGE="\n${div}\nAIF PROCEDURE: $var_MODULE_NAME module $(basename $var_AIF_PROCEDURE_PATH) procedure.\n${div}\n"
var_ARGS_USAGE+="Usage:\n${div}\n\n${var_MODULE_USAGE}\n\n${div}\nOptions:\n${div}
$(for usage_item in ${!usage_@}; do echo "${!usage_item}\n"; done)"
usage && exit 5
;;
esac
}
#read -r -d '' var_ARGS_USAGE <<-'EOF'
read -r -d '' var_MODULE_USAGE <<-EOF
(note that in the first example, the procedure must be sourced using the
raw/master remote path as AIF doesn't initially have git support. Once
the figtree procedure is running, the profile paths can (and should)
omit the raw/master path as in the second and third examples.
REMOTE PROCEDURE & REMOTE PROFILE
aif -p http://github.com/user/figtree/raw/master/procedures/automatic \\
-c profiles/my-laptop
REMOTE PROCEDURE & REMOTE PROFILE (DIFFERENT SOURCE URLS)
aif -p http://github.com/user/figtree/raw/master/procedures/automatic \\
-c http://github.com/OTHERUSER/figtree/profiles/my-laptop
LOCAL PROCEDURE & REMOTE PROFILE
aif -p figtree/automatic \\
-c https://github.com/altercation/figtree/profiles/my-laptop
LOCAL PROCEDURE & LOCAL PROFILE
aif -p figtree/automatic -c profiles/my-desktop
LOCAL PROCEDURE & LOCAL PROFILE (can be any figtree profile)
aif -p figtree/automatic -c systems/lenovo/x220/profile
EOF
# ----------------------------------------------------------------------
# procedure workers
# ----------------------------------------------------------------------
worker_preconfigure ()
{
echo "TEST>>>"
var_UI_TYPE=${arg_ui_type:-cli}
ui_init
unset_opts
}
unset_opts ()
{
for unused_opt in $unused_opts; do eval "unset intro_$unused_opt; unset option_$unused_opt; unset usage_$unused_opt"; done
}
worker_intro ()
{
inform "\nPROCEDURE: $var_MODULE_NAME module $(basename $var_AIF_PROCEDURE_PATH) procedure."
inform "PROFILE: $var_AUTOMATIC_PROFILE"
inform "\nOPTIONS:"
for intro_item in ${!intro_@}; do [[ -n "${!intro_item}" ]] && echo "${!intro_item}"; done
inform "\n${var_MODULE_ACTION}\n"
[[ -e "/arch" ]] && default_answer=yes
if [[ "$NOFAILSAFE" != "1" ]]; then ask_yesno "Do you want to continue?" $default_answer || exit; fi
}
worker_preload ()
{
inform "Installing preload packages required to access remote profiles..."
# check our command line parameters to see if we need to preload git, hg, etc.
# thus avoiding installing what we don't need
var_RUNTIME_PACKAGES_PRELOAD+=" $(detect_repo_type $var_AIF_PROCEDURE_PATH)"
var_RUNTIME_PACKAGES_PRELOAD+=" $(detect_repo_type $var_AUTOMATIC_PROFILE)"
# dedup and install packages
var_RUNTIME_PACKAGES_PRELOAD="$(dedup $var_RUNTIME_PACKAGES_PRELOAD)"
for i in `seq 0 $((${#var_RUNTIME_REPOSITORIES[@]}/2-1))`
do
repo=${var_RUNTIME_REPOSITORIES[$(($i*2))]}
location=${var_RUNTIME_REPOSITORIES[$(($i*2+1))]}
if ! list_pacman_repos runtime | grep -q $repo
then
add_pacman_repo runtime $repo "$location" || return 1
fi
done
rank_mirrors "runtime" #...slow, let's save it for install only
inform "Checking for pacman updates..."
$PACMAN -Sy pacman --noconfirm &>$LOG || return 1 # update pacman first to avoid having to run twice
# DEBUG: could skip this here since we are now checking makedepends in installaur function
if [[ -n $var_AUR_SUPPORT ]]; then
var_RUNTIME_PACKAGES_PRELOAD+=" $(echo $($PACMAN -Sg base-devel | awk '{print $2}'))" || die_error "Failed to poll pacman db for base-devel group members."
fi
for pkg in $var_RUNTIME_PACKAGES_PRELOAD
do
if ! $PACMAN -Qs "^$pkg$" &>$LOG
then
inform "Installing $pkg for use during installation..."; $PACMAN -Sy --noconfirm --needed $pkg &>$LOG || return 1
else
inform "Skipping $pkg; already installed."
fi
done
return 0
}
worker_configure ()
{
inform "Starting system configuration and profile load sequence..."
# NOTE: there is some overlap here with settings done in the following
# aif core procedures and libs:
#
# worker_configure_system,
# lib-ui-interactive.sh preconfigure_target()
# lib-misc.sh (functions referenced by preconfigure_target)
#
# we are retaining reference to those functions here (by not messing
# with that worker in the phase_system (we only add to phase_system)
# and by not overriding those library functions. This is intentional
# so as to minimize our "footprint" on aif core.
#
# we use some more abstract functions to write out system values
# to /etc/rc.conf
# LOAD PROFILE
var_LOADED_PROFILES= # reset
[[ -z "$var_AUTOMATIC_PROFILE" ]] && \
die_error "You must specify a config file (-c profile/path) to use this procedure. Run this procedure again with -h for more details."
load_profile $var_AUTOMATIC_PROFILE || \
die_error "Failed to load profile $var_AUTOMATIC_PROFILE from paths $var_MODULE_PATHS"
# DEFAULT VALUES
# override any default_ values using values from profiles
process_defaults_from_prefix "default_rc_"
process_defaults_from_prefix "default_fs_"
process_defaults_from_prefix "default_in_"
# RUNTIME SOURCES
# NOTE: we've already installed some prerequisite packages (git, etc.) in
# order to source remote profiles. We now load any other repos/packages
# that have been specified in the user profile.
var_RUNTIME_REPOSITORIES+=($RUNTIME_REPOSITORIES) # MUST BE IN AN ARRAY
var_RUNTIME_PACKAGES=$(dedup "$var_RUNTIME_PACKAGES $RUNTIME_PACKAGES")
var_RANKMIRRORS=$RANKMIRRORS
# FILESYSTEM
# TODO: we're not yet setting any defaults for filesystem values
[[ -z "$PARTITIONS" ]] && die_error "You did not specify a partition scheme"
[[ -z "$BLOCKDATA" ]] && die_error "You did not specify a partition scheme"
[[ -z "$GRUB_DEVICE" ]] && die_error "You did not specify a grub device"
var_GRUB_DEVICE=$GRUB_DEVICE
var_PARTITIONS=$PARTITIONS
var_BLOCKDATA=$BLOCKDATA
# TARGET PACKAGES
#var_TARGET_GROUPS+=$TARGET_GROUPS # TODO: smarter add to avoid duplicates (or just check at install to deduplicate?)
}
worker_select_source ()
{
# if the user has specified MIRROT & TARGET_REPOSITORIES, set here
# we could also allow them to specify cd/net (TODO: SOURCE has been retired, apparently. ask dieter about this.)
if [[ -z $MIRROR ]]
then
MIRROR='ftp://mirrors.kernel.org/archlinux/$repo/os/$arch'
fi
if [[ -z $TARGET_REPOSITORIES ]]
then
if [ -d /repo/core ]; then
TARGET_REPOSITORIES=(core 'file:///repo/$repo/$arch' extra $var_MIRRORLIST community $var_MIRRORLIST)
else
TARGET_REPOSITORIES=(core $var_MIRRORLIST extra $var_MIRRORLIST community $var_MIRRORLIST)
fi
fi
#rank_mirrors "runtime" #...slow, let's save it for install only
}
worker_package_list ()
{
# change to addition as we may have some var_TARGET_PACKAGES already
# TODO: refactor this to not use var_TARGET? should the _packages command use $TARGET_PACKAGES instead?
var_TARGET_PACKAGES+=$TARGET_PACKAGES
var_TARGET_GROUPS+=$TARGET_GROUPS
var_TARGET_PACKAGES_EXCLUDE+=$TARGET_PACKAGES_EXCLUDE
#TARGET_GROUPS is set to base by default in the beginning of this procedure
#[ -z "$var_TARGET_PACKAGES" -a -z "$var_TARGET_GROUPS" ] && var_TARGET_GROUPS=base
true
}
worker_write_configs()
{
inform "Configuration write started..."
for i in $(seq ${#var_CONFIG_CHANGES[@]})
do
d "CONFIG: executing config $i of ${#var_CONFIG_CHANGES[@]}: ${var_CONFIG_CHANGES[$i]#_}"
${var_CONFIG_CHANGES[$i]}
done
write_rc_values_from_defaults
}
worker_write_overlay()
{
inform "Overlay write started..."
dfunc
local overlay_file=
for overlay_file in $var_OVERLAY_FILES
do
write_overlay_file "$overlay_file"
done
}
worker_write_coda ()
{
# complete system customization
inform "Final system customization commands started..."
dfunc
rank_mirrors # TODO: copy from runtime optimized file would be faster, but there might be different repos
for misc_command in ${var_CODA_COMMANDS[@]}
do
eval "$misc_command" || show_warning "COMMAND FAILED" "coda command failed: $misc_command"
done
# rerun postconfigure_target
# this has been run once in the the core/automatic worker_configure_system
# but we may have made changes that require mkinitcpio recreation.
# it is acceptable to run postconfigure_target multiple times, though we
# could alternately run just mkinitcpio creation here
postconfigure_target
}
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# library functions
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# These should ideally be stored in the module libs directory as an
# actual function library, but as we are loading the procedure remotely
# it's better to bundle them here. Alternately we could try to
# manufacture a libs path from the remote procedure, but this adds
# potentially needless complexity. TODO: consider breaking out a
# separate lib and creating a remote lib sourcing function in future
# ----------------------------------------------------------------------
# lib-utilities
# ----------------------------------------------------------------------
# gneral utility functions
strip_punct() { local stripped="$*"; stripped="${stripped#[[:punct:]]}"; stripped="${stripped%[[:punct:]]}"; echo -n "$stripped"; }
unquote() { local stripped="$*"; stripped="${stripped#\"}"; stripped="${stripped%\"}"; echo -n "$stripped"; }
dedup() { for item in $*; do [[ "$newlist" == "${newlist#* $item }" ]] && newlist="${newlist% } $item "; done; echo -n ${newlist[*]}; }
toupper() { echo -n ${1} | tr a-z A-Z; }
d() { debug PROCEDURE "$1"; return 0; }
dstate()
{
return
# NOTE: off for now. d "STATECHK: $1 is currently: ${!1}";
}
dfunc()
{
# quick debug function, better info than "caller"
# called without parameters at top of function; reports where it was called
# from (calling function and line number)
[[ $1 ]] && local with_args=" with arguments \"$@\"" || local with_args=""
d "FUNCTION: ${FUNCNAME[1]}() called at line ${BASH_LINENO[1]} by ${FUNCNAME[2]}${with_args}"
}
pause() { echo "$*"; read; }
debug_report_values()
{
echo "================================================================================"
local valuelist=$@
for valname in $valuelist
do
echo "$valname=${!valname}"
done
echo "================================================================================"
echo "enter to continue..."
read answer
}
debug_report_next()
{
echo "================================================================================"
echo "about to execute:"
echo "$*"
echo "================================================================================"
echo "enter to continue..."
read answer
}
detect_repo_type()
{
# detect/set repo_type
local repo_type=${var_REPO_TYPE:-} # from optional -r command line argument
local test_path="$1"
if [[ -z ${repo_type} ]]; then # no repo type set on command line
if [ $(expr "$test_path" : 'https*://.*github.com') -gt 0 ]; then repo_type=git
elif [ $(expr "$test_path" : '.*/git/') -gt 0 ]; then repo_type=git
elif [ $(expr "$test_path" : 'https*://.*bitbucket.org') -gt 0 ]; then repo_type=mercurial
elif [ $(expr "$test_path" : '.*/hg/') -gt 0 ]; then repo_type=mercurial
elif [ $(expr "$test_path" : '.*/mercurial/') -gt 0 ]; then repo_type=mercurial
elif [ $(expr "$test_path" : 'https*://code.google.com') -gt 0 ]; then repo_type=subversion
elif [ $(expr "$test_path" : '.*/trunk/') -gt 0 ]; then repo_type=subversion
elif [ $(expr "$test_path" : '.*/svn/') -gt 0 ]; then repo_type=subversion
fi
elif [[ "${repo_type}" == "hg" ]]; then
repo_type="mercurial"
elif [[ "${repo_type}" == "svn" ]]; then
repo_type="subversion"
fi
# we've set the repo type automatically or via command line, so one more check to make sure
# that no unsupported repository type has been specified by -r on the command line
case $repo_type in
git|mercurial|svn) echo -n $repo_type; return 0 ;;
*) return 1 ;; # must handle this error differently in different callers
esac
}
# ----------------------------------------------------------------------
# lib-fileprocessing
# ----------------------------------------------------------------------
# functions related to loading and processing configuration profiles
#
process_defaults_from_prefix()
{
local procedure_items=
local profile_items=
local default_prefix=$1
eval local defaults_list=\"\${!${default_prefix}@}\"
for default_value_name in $defaults_list
do
eval local value_name=\${default_value_name#${default_prefix}}
if [[ -z ${!value_name} ]]
then # the non-default VALUENAME is null; assign it the default_ value
if declare | grep -q "$default_value_name=("
then # array
eval eval $value_name="(\${$default_value_name[*]})"
#eval "echo $value=\"(\${$value[*]})\"" # DEBUG
else # normal variable
eval $value_name=\'${!default_value_name}\'
fi
eval "procedure_items+=\"Using default procedure value for \$value_name: \${$value_name[*]}\\n\""
else
eval "profile_items+=\"Using custom profile value for \$value_name: \${$value_name[*]}\\n\""
fi
done
inform "$procedure_items\n$profile_items"
}
write_rc_values_from_defaults()
{
# we have probably already run the process_defaults_from_prefix
# and we are *not* going to be using the actual default_VALUENAME variables
# instead, we have already assigned default_VALUENAME to VALUENAME *or*
# simply taken the # profile VALUENAME
#
# in this function we take a valuename, strip the default prefix, and
# use the result to assign a valuename/value pair to /etc/rc.conf
#
local default_prefix="default_rc_"
eval local defaults_list=\"\${!${default_prefix}@}\"
for default_value_name in $defaults_list
do
eval local value_name=\${default_value_name#${default_prefix}}
_config set /etc/rc.conf $value_name "${!value_name}" || die_error "CRITICAL: failed to write $value_name to /etc/rc.conf"
done
return 0
}
source_and_overlay()
{
# load_profile will have confirmed path prior to reaching this point
# TODO: consider refactoring this to an array
dfunc "$*"
local revert_overlay=$var_OVERLAY_PATH
var_OVERLAY_PATH="$(dirname "$1")/overlay"
# inform "Sourcing profile:\n${1#$var_TEMP_DIR}"
local divider="----------------------------------------------------------------------"
source "$1" && inform "\n${divider}\nSuccessfully loaded profile:\n${1#$var_TEMP_DIR}\n${divider}\n" \
|| die_error "Failed to process profile \"$1\"."
var_OVERLAY_PATH=$revert_overlay
}
make_local ()
{
# takes a remote repo path and returns a local path (dies if repo can't be cloned)
local __returnvarname=${2:-} # can take a variable name to assign result to (otherwise returns value via echo
dfunc
# repo remote is almost certainly never going to be an actual repo path
# but rather a full path to a file in a repo. This is complicated by the fact that we want to provide aif an actual file for it's initial procedure and
# subsequently use that repo information (extracted from the raw path). The infix values below should clean this up for us, but needs more testing
local full_path_remote=$1
# check repo type
local repo_type=$(detect_repo_type "$full_path_remote") || die_error "Unexpected error detected repository type from $full_path_remote"
# now set the repo clone command, infix to strip out github/bitbucket raw file path values, and any suffix required to clone (.git)
local repo_command=
local repo_suffix=
local repo_infix=
case $repo_type in
git)
repo_command="git clone --depth=0"
repo_suffix=".git"
repo_infix="raw[./][^/]*/" # raw/master/
;;
hg|mercurial)
# note that repo type should never be hg at this point
repo_command="hg clone"
repo_infix="raw[./][^/]*/[^/]*/" # raw/32dc76fda832/src/
;;
svn)
repo_command="svn checkout"
# google code, at least, doesn't stick in any infixes for the raw file
# but does present a different url for the raw file. this url works for cloning, however
# http://code.google.com/p/googlecl/source/browse/trunk/INSTALL.txt
# http://googlecl.googlecode.com/svn/trunk/INSTALL.txt
# I haven't tested other svn services yet
;;
esac
# strip infix out of remote path, allowing us to just use makelocal after this
# all paths passed to this function must be treated as suspect as any of them
# could be raw format paths. for example, this converts the following github url:
# PRE INFIX STRIP: http://github.com/altercation/figtree/raw/master/procedures/automatic/systems/lenovo/x220
# POST INFIX STRIP: http://github.com/altercation/figtree/procedures/automatic/systems/lenovo/x220
full_path_remote=$(echo -n "$full_path_remote" | sed "s^$repo_infix^^")
# at this point, we have cleaned up our full remote path and identified what kind of repo it is
# the repo may still be already cloned locally. normally if the user specifies a full remote
# procedure path and then a relative path for the profile, we will add the local repo module
# root to our var_MODULE_PATHS array and won't normally have to double check if the repo is already
# present. However, if someone passes identical repo paths in both the aif procedure command line
# argument and the profile argument (or embedded in a depend_profile command in a profile), we
# need to be ready to handle it here
# make a local path for the profile file for an initial file check and, failing that, cloning, for example:
# REMOTE: http://github.com/altercation/figtree/procedures/automatic/systems/lenovo/x220
# LOCAL: /tmp/aif/figtree-temp/github.com/altercation/figtree/procedures/automatic/systems/lenovo/x220
local full_path_local="$var_TEMP_DIR/${full_path_remote:$(expr "$full_path_remote" : 'https*://')}"
# we can now use that profile path to clone the remote repo by cloning, failing, and trimming until we get down to the actual repo root.
# this is a bit brute force and we could be smarter about it if we wanted to supply an actual template for the repo root, for example
# github's pattern is github.com/username/reponame
# that's left for a later optimization round. even with that we should support private repository urls (or unknown or less popular services)
# that have unknown URL formats. brute force try/trim/iterate does this for now.
local repo_remote="$full_path_remote" # initial repo_remote assignment
local repo_local="$var_TEMP_DIR/${repo_remote:$(expr "$repo_remote" : 'https*://')}"
while [[ -n "$repo_remote" && ${#repo_remote} > 1 ]]; do # loop till we trim too much, indicating profound failure
if [[ -e "$full_path_local" ]]; then # found local file
[[ "$__returnvarname" ]] && eval $__returnvarname="'$full_path_local'" || echo "$full_path_remote"
return 0
elif $repo_command $repo_remote $repo_local &>$LOG; then # successful clone
# we return the local path and load_profile can handle adding it to the path
[[ "$__returnvarname" ]] && eval $__returnvarname="'$full_path_local'" || echo "$full_path_remote"
return 0
else # clone failed
rm -rf "$(dirname $repo_local)" # clean up bad repo path
fi
# trim and retry
repo_remote=$(dirname "$repo_remote")
repo_local="$var_TEMP_DIR/${repo_remote:$(expr "$repo_remote" : 'https*://')}"
done; die_error "failed to download repo from path $1"
}
update_modulepaths_from_profilepath()
{
dfunc "$*"
# expects parameters:
# $1: a local absolute path or http remote *profile* path we need to make some assumptions about either the depth of all profile paths or standard names
# such as procedure, systems, etc. this is the trickiest assumption we make and the only other option I see is to never allow a relative path, which is silly.
# requires state: $var_KNOWN_SUBDIRS
# modifies state: $var_MODULE_PATHS
local module_path=${1%profile} # strip profile
module_path=${module_path%/} # strip trailing slash
while [[ -n "$module_path" && ${#module_path} > 1 ]]; do
for known_subdir in ${var_KNOWN_SUBDIRS[@]}; do
if [[ "$(basename $module_path)" == "$known_subdir" ]]
then
# we have a match and assume we are one level away from root || trim and loop
module_path="$(dirname $module_path)"
# now check to see if this path is already in var_MODULE_PATHS
for test_path in ${var_MODULE_PATHS[@]}; do [[ "$test_path" == "$module_path" ]] && return 0; done
# still here, add path to var_MODULE_PATHS
var_MODULE_PATHS=("$module_path" "${var_MODULE_PATHS[@]}")
d "STATECHK: var_MODULE_PATHS updated to: ${var_MODULE_PATHS[*]}"
return 0
fi
done
module_path="$(dirname $module_path)" # no match, trim a level
done
# if we are still here then we didn't find a "known subdir"... best to error out here, though another option is to just strip the last two / three
# path components. we could try to standardize all "known" subdir formats to: modulename/knownsubidr/category/specificprofile, eg:
# figtree/systems/lenovo/x220 or figtree/environments/xmonad/es
die_error "failed to find known subdirectory (one of ${var_KNOWN_SUBDIRS[@]}) in path $1;"
}
load_profile()
{
dfunc "$*"
# arguments:
# $1 (assigned to profile_path): a path, either relative, absolute local or remote URL
# $2 is the *original* type of profile (relative, remote, absolute) and is only set from insid the function
# expects/uses state:
# OVERLAY: current value of OVERLAY directory path
# var MODULE_PATHS: array of paths to use, in order, as module roots with relative paths
local profile_path=${1%/} # strips any trailing slash
local profile_type=${2:-} # only used internally in this function
if [[ "${profile_path:0:1}" == "/" ]]; then # absolute local path
# add to module paths (thus if we've donwloaded a repo, we'll check locally next time)
[[ "${profile_type}" != "relative" ]] && update_modulepaths_from_profilepath "$profile_path"
[[ -d "${profile_path}" ]] && profile_path="$profile_path/profile"
if [[ -f "${profile_path}" ]]
then
if $(echo -n "$var_LOADED_PROFILES" | grep -q "$profile_path")
then
show_warning "Profile load loop!" "The following profile has
been previously sourced; sourcing again will result in a loop;
skipping: $profile_path"
else
var_LOADED_PROFILES+="$profile_path "
source_and_overlay "$profile_path" && return 0 || return 1
fi
else
return 1
fi
elif [[ "${profile_path:0:4}" == "http" ]]; then # remote url, could be http or https with this match
if make_local $profile_path profile_path_local
then
d "PROFILES: preparing to load profile $profile_path_local"
load_profile "$profile_path_local" ${profile_type:-http} || return 1
else
die_error "should definitely not reach this error. there was a problem getting the local profile path from a remote repo."
fi
else # assumed relative path (e.g. profiles/profilename)
for module_root in ${var_MODULE_PATHS[@]}; do load_profile "${module_root%/}/${profile_path#/}" "relative" && return 0; done; return 1
fi
}
select_profile ()
{
true
# for future use in an interative procedure
}
write_overlay_file()
{
local overlay_file="$1"
from_path="${overlay_file/\/\///}" # command line *requires* last slash to be escaped like this: from_path="${overlay_file/\/\//\/}"
to_path="$var_TARGET_DIR/${overlay_file#*\/\/}"
if [[ ! -f "$from_path" ]]
then
show_warning "NO OVERLAY FILE SOURCE" "Profile overlay requests non-existant source file: \"$from_path\""
return 1
fi
if install -C -D -S ".figtree.old" -v -T "$from_path" "$to_path" &>$LOG # optional: --preserve-context for selinux compliance
then
inform "Successful write of overlay file \"$overlay_file\""
return 0
else
show_warning "CRITICAL OVERLAY FILE ERROR" "Failed to overlay file from \"$from_path\" to \"$to_path\""
return 1
fi
}
# ----------------------------------------------------------------------
# lib-profiles
# ----------------------------------------------------------------------
# functions to support figtree profiles
# load (possibly many, possibly a tree of) profiles, local, relative, or remote paths
depend_profile() { [[ -z "${var_SINGLEPROFILE}" ]] && load_profile "$1"; }
# The following are the main set of additional commands available to figtree profiles
# and build up lists of commands that will be executed later by workers (thus the
# commands that are represented in the profile aren't "truly" executed till the
# appropriate phase/worker in aif).
#
# Usage examples:
#
# packages packagename [packagename...]
# package_groups groupname [groupname...]
# blacklist packagename [packagename...] (same as blacklisting a package)
# config set /config/file/path VALUENAME "value to set"
# config unset /config/file/path VALUENAME (comments out the value)
# daemons add daemonname [@daemonname] [!daemonname]
# daemons remove daemonname [daemonname...]
# modules add modulename [modulename...]
# modules remove modulname [modulename...]
# modules add networkname [networkname...]
# modules remove networkname [networkname...]
# overlay /install/path/file [/another/file/here]
# (place the file in the overlay directory or use the
# figtree module partial-update-overlay procedure)
# code 'misc command to be run at end of installation goes-here'
# kernelparams parameter list here
#
# plural and singular forms of each command are equivalent in order to
# eliminate a common error in the config syntax
# packages|package package_groups|package_group blacklist|blacklists
# config|configs daemons|daemon modules|module networks|network
# kernelparams|kernelparam coda|codas overlays|overlay
#
# the following are equivalent and may be used interchangeably
# add==set unset==remove==delete
packages() { dfunc; [[ -n "$*" ]] && var_TARGET_PACKAGES+="$* "; }
package() { packages "$*"; }
package_groups(){ dfunc; [[ -n "$*" ]] && var_TARGET_GROUPS+="$* "; }
package_group() { package_groups "$*"; }
blacklist() { dfunc; [[ -n "$*" ]] && var_TARGET_PACKAGES_EXCLUDE+="$* "; }
blacklists() { blacklist "$*"; }
config () { dfunc; [[ -n "$*" ]] && var_CONFIG_CHANGES+=("_config $*"); }
configs() { config "$*"; }
daemons() { dfunc; [[ -n "$*" ]] && var_CONFIG_CHANGES+=("_daemons $*"); }
daemon() { daemons "$*"; }
modules() { dfunc; [[ -n "$*" ]] && var_CONFIG_CHANGES+=("_modules $*"); }
module() { modules "$*"; }
networks() { dfunc; [[ -n "$*" ]] && var_CONFIG_CHANGES+=("_networks $*"); }
network() { networks "$*"; }
kernel_params() { dfunc; [[ -n "$*" ]] && var_CONFIG_CHANGES+=("_kernel_params $*"); }
kernel_param() { kernelparams "$*"; }
coda() { dfunc; [[ -n "$*" ]] && var_CODA_COMMANDS+=("$*"); }
codas() { coda "$*"; }
overlays() { dfunc; [[ -n "$*" ]] && for overlay_file in $*; do var_OVERLAY_FILES+=" $var_OVERLAY_PATH//${overlay_file#/}"; done; }
overlay() { overlays "$*"; }
# all _functions are identical to the non _ variants used in profile files.
# The profile commands (config, daemons, etc.) that have _ equivalents are
# used to compile lists of _function commands
_config () { config_switch "$@"; }
_daemons () { config_switch "$@"; }
_modules () { config_switch "$@"; }
_networks () { config_switch "$@"; }
_kernel_params () { kernel_parameter_write "$@"; }
# ----------------------------------------------------------------------
# lib-config
# ----------------------------------------------------------------------
# functions to work with configuration files and values
#write_configs() { dfunc; for config_command in $CONFIG_CHANGES; do $config_command; done }
get_value()
{
# get_config /dir/sub/filename valuename [returnvariable] (returns value via echo or returnvariable if present)
local __returnvarname=${3:-}; local config_file="$1"; local config_name="$2"; local config_value=
[[ ! -f "$config_file" ]] && die_error "Shouldn't reach this error: get_config failed to find config file \"$config_file\"; this should be checked in caller."
local config_line=$(egrep "^[[:space:]]*${config_name}" "${config_file}")
[[ -n $config_line ]] && config_value=`echo -n $config_line | sed "s/.*$config_name.*=[[:space:]]*\(.*\)[[:space:]]*$/\1/"` || return 1
config_value="$(unquote "$config_value")"
[[ ! "$__returnvarname" ]] && echo -n "$config_value" || eval $__returnvarname=\"$config_value\"
}
# TODO: could make this (possibly too) clever by looking for commented out
# lines and reusing those, but it's probably better to spend that effort
# looking at augeas as a replacement
config_value()
{
# set_config /config/file/path config_item_name config_item_value
local config_method="$1"; shift; local config_file="$1"; shift; local config_name="$1"; shift; local config_value="$*"
[[ ! -f "$config_file" ]] && show_warning "MISSING CONFIG FILE" "Config file \"$config_file\" not found; skipping" && return 1
[[ "${config_value:0:1}" == "(" ]] && local quote= || quote='\"'
if get_value "$config_file" "$config_name" current_value
then
case $config_method in
set) sed -i "s+\(^\s*${config_name}=\).*$+\1${quote}${config_value}${quote}+" "${config_file}" ;;
unset) sed -i "s+\(^\s*${config_name}=.*$\)+#\1 # automatically commented out+" "${config_file}" ;;
esac
else
case $config_method in
set) echo -e "\n${config_name}=${quote:1:1}${config_value}${quote:1:1} # automatically appended" >> "$config_file" ;;
unset) : ;;
esac
fi
}
config_list ()
{
# config_list set /config/file/path ARRAYNAME list values go here
local config_method="$1"; shift; local config_file="$1"; shift; local config_name="$1"; shift; local config_list="$*"
[[ ! -f "$config_file" ]] && d "Missing config file" "Config file \"$config_file\" not found; skipping" && return 1
if ! $(get_value "$config_file" "$config_name" current_list && current_list=$(strip_punct "$current_list"))
then
# failed to find array value in target file. a common example of this
# is "NETWORKS" in /etc/rc.conf, which is by default commented out. I
# probably should just uncomment as I used to do. TODO: uncomment when
# appropriate (check into augeas first before implementing that again)
config_value set "$config_file" "$config_name" "($config_list)" && ( inform "new value \"$config_name\" written to $config_file"; return 0 ) || return 1
fi
for config_list_item in $config_list; do
local new_list=
for current_list_item in $current_list; do
if [[ "$(strip_punct $config_list_item)" == "$(strip_punct $current_list_item)" ]]; then
[[ "$config_method" == "set" ]] && new_list+="$config_list_item "
config_list_item=
else
new_list+="$current_list_item "
fi
done
new_list+="$config_list_item"
current_list=$new_list
done
new_list="(${current_list% })"
config_value set "$config_file" "$config_name" "$new_list"
}
config_switch ()
{
# config set /config/file/path CONFIG_ITME_NAME config value here
local method=$1; shift
local target_value=$(echo -n ${FUNCNAME[1]#_} | tr a-z A-Z)
if [[ "$target_value" == "CONFIG" ]]
then
local target_function="config_value"
local file_path="${var_TARGET_DIR}${1}"; shift
local target_value="$1"; shift
else
local target_function="config_list"
local file_path="$var_TARGET_DIR/etc/rc.conf"
fi
case $method in
set|add)
$target_function set $file_path $target_value $* && ( inform "CONFIG SUCCESS: value \"$target_value\" set to \"$*\" in $file_path"; return 0 ) || ( show_warning "CONFIG ERROR" "Failed to write value \"$target_value\" set to \"$*\" in $file_path"; return 1 )
;;
unset|remove|delete)
"$target_function" unset $file_path $target_value $* && ( inform "CONFIG SUCCESS: value \"$target_value\" removed from $file_path"; return 0 ) || ( show_warning "CONFIG ERROR" "Failed to remove value \"$target_value\" from $file_path"; return 1 )
;;
esac
}
kernel_parameter_write()
{
# TODO: identify bootloader?
# TODO: comment out line instead of just rewriting
# grub 1 we change /boot/grub/menu.lst first 'kernel' line
# grub 2 we change in /etc and then regen the grub config, if I remember correctly
# syslinux we change /boot/syslinux/syslinux.cfg first APPEND
local boot_loader_append=$(echo -n "$*" | sed "s_[^\]/_\\\/_g") # escape slashes
local boot_loader_config_files="/boot/grub/menu.lst /boot/syslinux/syslinux.cfg"
local boot_loader_identifier='^\s*\w.*root=.* ro'
local boot_loader_changed_return=1
for boot_loader_config_file in $boot_loader_config_files
do
local target_boot_loader_config_file="$var_TARGET_DIR/$boot_loader_config_file"
if [[ -f "$var_TARGET_DIR/$boot_loader_config_file" ]]
then
if sed -i "0,/\($boot_loader_identifier\).*$/ s//\1 $boot_loader_append/" $target_boot_loader_config_file
then
inform "successfully appended kernel parameters in $target_boot_loader_config_file"
boot_loader_changed_return=0
else
show_warning "KERNEL PARAM WRITE ERROR" "Failed to write kernel parameters to file $target_boot_loader_config_file"
return 1
fi
else
inform "$target_boot_loader_config_file not present; skipping"
fi
done
return $boot_loader_changed_return
}
# ----------------------------------------------------------------------
# lib-software
# ----------------------------------------------------------------------
# functions that override or augment aif core lib-software.sh
rank_mirrors()
{
# uses global state $var_RANKMIRRORS
local mirrorsranked=
[[ "$1" != "runtime" ]] && local config_root="$var_TARGET_DIR"
if [[ $var_RANKMIRRORS > 0 ]]; then
inform "Beginning mirror optimization; this may take a while..."
local mirrorlist="$config_root/etc/pacman.d/mirrorlist"
cp -n "$mirrorlist" "${mirrorlist}.original" && cp "$mirrorlist" "${mirrorlist}.prerank" && \
sed -i 's/^#Server/Server/g' "${mirrorlist}.original" && rankmirrors -n 10 "${mirrorlist}.original" | tee "$mirrorlist" && \
mirrorsranked=true
if [[ -n "$mirrorsranked" ]]; then
inform "Mirror optimization complete."
return 0
else
show_warning "RANKMIRRORS ERROR" "Mirror optimization failed."
return 1
fi
else
inform "Mirror optimization skipped\nSet RANKMIRRORS to a non zero value number of top mirrors, 10 is a good number, in a profile if you wish to run during install."
fi
}
installaur()
{
# helper for our custom installpkg
# pause "INSTALL $1 FROM AUR?"
inform "Downloading $1 from AUR..."
local aur_package="$1"
# if [[ `$PACMAN_TARGET -Q "^$aur_package$" &>$LOG` ]]; then
# notify "$aur_package already installed; skipping"
local build_dir="${var_TARGET_DIR}/tmp/${aur_package}-install" #$var_TEMP_DIR/$aur_package*
if ! $PACMAN_TARGET -Qs "^$aur_package$" &>/dev/null
then
[[ -e "$build_dir" ]] && rm -rf $build_dir
local restore_dir=`pwd`
mkdir -p $build_dir &>$LOG && cd $build_dir &>$LOG
if ! wget "http://aur.archlinux.org/packages/${aur_package}/${aur_package}.tar.gz" &>$LOG
then
show_warning "AUR Install" "Failed to wget $aur_package from AUR."
#return 1
elif ! tar -xzvf "${aur_package}.tar.gz" &>$LOG
then
show_warning "AUR Install" "Failed to extract archive for $aur_package."
#return 1
else
# get list of dependencies
# for each item, install
cd "$aur_package" #&>$LOG
local dep_list
local make_dep_list
if get_value "PKGBUILD" "makedepends" make_dep_list
then
make_dep_list="$(strip_punct "$make_dep_list")" # parens, but packages might still have single quotes
d "Found AUR package make depends list $make_dep_list..."
else
d "No make depends packages listed in PKGBUILD for ${aur_package}..."
fi
# TODO: this doesn't source make_dep_pkg from AUR...
for make_dep_pkg in $make_dep_list
do
make_dep_pkg="$(strip_punct $make_dep_pkg)"
if ! $PACMAN -Qs "^$make_dep_pkg$" &>$LOG
then
inform "Installing $make_dep_pkg for use during installation..."
if $PACMAN -Sy --noconfirm --needed $make_dep_pkg &>$LOG
then
d "GOOD MAKE DEPS INSTALL FOR $make_dep_pkg"
else
show_warning "MAKEDEPENDS Install Failer" "BAD INSTALL FOR $make_dep_pkg; see LOG" #DEBUG
return 1
fi
else
d "Skipping $make_dep_pkg; already installed."
fi
done
if get_value "PKGBUILD" "depends" dep_list
then
dep_list="$(strip_punct "$dep_list")" # parens, but packages might still have single quotes
inform "Found AUR package depends list $dep_list..."
else
inform "No depends packages listed in PKGBUILD for ${aur_package}..."
fi
for dep_pkg in $dep_list
do
dep_pkg="$(strip_punct $dep_pkg)"
if ! $PACMAN_TARGET -Qs "^$dep_pkg$" &>$LOG
then
inform "Installing $dep_pkg for use during installation..."
# TODO: the pacman -Qs check should be made a helper
# function? we should do it again here since dependencies
# might be in AUR as well
# if $PACMAN_TARGET -Sy --noconfirm --needed $dep_pkg &>$LOG
if run_controlled pacman_installpkg "$PACMAN_TARGET --noconfirm -S $dep_pkg" $TMP_PACMAN_LOG "Installing ${aur_package} dependency $dep_pkg..."
then
d "GOOD DEPS INSTALL FOR $dep_pkg"
else
show_warning "DEPEND Install Failer" "BAD INSTALL FOR $dep_pkg; see LOG" #DEBUG
return 1
fi
else
d "Skipping $dep_pkg; already installed."
fi
done
# TODO:
# there are some unknowns here
# 1. are we running out of space during install? is this is
# a problem if using /tmp instead of /mnt/tmp? are we sure /mnt/tmp
# is properly mounted to install target? Does it matter and should
# we just use /tmp?
# 2. is our attempt to make the PACMAN environment variable
# PACMAN_TARGET for makepkg -s working? (this should be testable)
# 3. do we need to skip the makepkg -s dependency checks and just
# install the packages ourselves? This related to the next item...
# 4. If we are doing a manual makedeps check above, does makepkg -s
# repeat this and install those items on the target system? We
# don't necessarily *need* them there (though they might be useful
# if we need to upgrade/install later). What's the status of those
# makedepends items on the target?
# (testing with weechat-git)
echo "TEST: >>>>>SRCDEST=$SRCDEST && PKGDEST=$PKGDEST"
SRCDEST="$build_dir/$aur_package/src"
PKGDEST="$build_dir/$aur_package/pkg"
echo "TEST: >>>>>SRCDEST=$SRCDEST && PKGDEST=$PKGDEST"
echo "proceeding in 3 sec"
sleep 3
local PACMAN_REVERT="$PACMAN"
export PACMAN="$PACMAN_TARGET" # makepkg -s uses PACMAN env variable and we try to make sure we're using PACMAN_TARGET HERE; might not work? DEBUG
#if ! makepkg -s --asroot --noconfirm #&>$LOG
if ! makepkg --syncdeps --asroot --noconfirm --clean --cleancache #&>$LOG
#if ! makepkg --nodeps --asroot --noconfirm --clean --cleancache #&>$LOG
then
show_warning "AUR Install" "Failed to makepkg for $aur_package"
#return 1
else
run_controlled pacman_installaur "$PACMAN_TARGET --noconfirm -U *.pkg.tar.xz" $TMP_PACMAN_LOG "Installing $aur_package..."
fi
local PACMAN=$PACMAN_REVERT
fi
#ask_yesno "Do you want to continue?" $default_answer || exit # DEBUG
cd $restore_dir &>$LOG
if [[ -e "$build_dir" ]]
then
echo "DEBUG: about to rm install dir..."
#read
sleep 2
rm -rf $build_dir &>$LOG
echo "DEBUG: completed rm install dir..."
#read
else
echo "DEBUG: skipped rm install dir due to failed existence check..."
sleep 1
#read
fi
else
die_error "SHOULDN'T BE HERE: installaur() has been passed a package that pacman can find as well: $aur_package"
fi
}
# perform package installation to the target system
# overriding this function from aif core's lib-software.sh so that we can fail
# over to AUR install without having to specify if a package is AUR or not
installpkg() {
[[ -n "$1" ]] && PACMAN_TARGET="$1"
ALL_PACKAGES=
[ -n "$var_TARGET_GROUPS" ] && ALL_PACKAGES=`list_packages group "$var_TARGET_GROUPS" | awk '{print $2}'`
if [ -n "$var_TARGET_PACKAGES_EXCLUDE" ]
then
for excl in $var_TARGET_PACKAGES_EXCLUDE
do
ALL_PACKAGES=${ALL_PACKAGES//$excl/}
done
fi
if [ -n "$var_TARGET_PACKAGES" ]
then
[ -n "$ALL_PACKAGES" ] && ALL_PACKAGES="$ALL_PACKAGES $var_TARGET_PACKAGES"
[ -z "$ALL_PACKAGES" ] && ALL_PACKAGES=$var_TARGET_PACKAGES
fi
ALL_PACKAGES=`echo "$ALL_PACKAGES"`
ALL_PACKAGES=`dedup "$ALL_PACKAGES"`
#ALL_PACKAGES="gromit-mpx $ALL_PACKAGES" #DEBUG
[ -z "$ALL_PACKAGES" ] && die_error "No packages/groups specified to install"
target_special_fs on
notify "Installing packages..."
for package in $ALL_PACKAGES
do
if $PACMAN_TARGET -Qs "^$package$" &>$LOG
then
inform "Skipping: $package already installed"
elif $PACMAN_TARGET -Ss "^$package$" &>$LOG
then
run_controlled pacman_installpkg "$PACMAN_TARGET --noconfirm -S $package" $TMP_PACMAN_LOG "Installing $package..." && good_packages+="$package " || bad_packages+="$package "
elif [[ -z "$var_AUR_SUPPORT" ]]
then
echo "DEBUG: var_AUR_SUPPORT=$var_AUR_SUPPORT"
show_warning "PACKAGE NOT FOUND - $package" "To search AUR for packages not found in official repos, run again with the\n'-a'."
elif installaur "$package"
then
good_packages+="$package "
else
bad_packages+="$package "
fi
done
local _result=''
if [ $CONTROLLED_EXIT -ne 0 ]; then
_result="Installation Failed (see errors below)"
echo -e "\nPackage Installation FAILED." >>$TMP_PACMAN_LOG
else
_result="Installation Complete"
echo -e "\nPackage Installation Complete." >>$TMP_PACMAN_LOG
fi
show_warning "$_result" "$TMP_PACMAN_LOG" text || return 1
target_special_fs off
sync
# echo -e "DEBUG: good packages:\n$good_packages"
# echo -e "DEBUG: bad packages:\n$bad_packages"; read
return $CONTROLLED_EXIT
}