Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Added old scripts

Moved the old scripts from the old github repos. Renamed the various
README files to make it obvious to which recipe they relates to.
  • Loading branch information...
commit 105108b5ce4f826af81028f6ee61bcfe4a121a6a 1 parent d6c590c
graziano obertelli authored
View
8 bash/debian/database.yml
@@ -0,0 +1,8 @@
+production:
+ adapter: postgresql
+ database: redmine
+ host: x.x.x.x
+ port: 5432
+ username: redmine
+ password: *********
+ encoding: utf8
View
9 bash/debian/email.yml
@@ -0,0 +1,9 @@
+production:
+ delivery_method: :smtp
+ smtp_settings:
+ address: smtp-remote-server
+ port: 25
+ domain: eucalyptus.com
+ authentication: :none
+# user_name: redmine
+# password: ********
View
29 bash/debian/eucabot.README
@@ -0,0 +1,29 @@
+EucaBot Recipe
+--------------
+
+This recipe is a very simple recipe to be used with our test images
+(debian-based). One of the important EucaBot feature is to keep track of
+our meeting notes.
+
+The original scripts and noted are at
+https://projects.eucalyptus.com/redmine/issues/32
+
+Details
+-------
+
+TBD
+
+Usage
+-----
+
+Make sure you have a security group which allow for port 22 (SSH) to be
+open:
+
+ euca-add-group -d "eucabot security group" eucabot
+ euca-authorize -p 22 -P tcp -s 0.0.0.0/0 eucabot
+
+When starting with one of our debian-based images you can do something
+like
+
+ euca-run-instance -g eucabot -k XXXX emi-XXXXXX -f eucabot.sh
+
View
213 bash/debian/eucabot.sh
@@ -0,0 +1,213 @@
+#!/bin/bash -x
+#
+# Script to install eucabot
+
+# variables associated with the cloud/walrus to use: CHANGE them to
+# reflect your walrus configuration
+WALRUS_NAME="community" # arbitrary name
+WALRUS_IP="FIXME" # IP of the walrus to use
+WALRUS_ID="FIXME" # EC2_ACCESS_KEY
+WALRUS_KEY="FIXME" # EC2_SECRET_KEY
+WALRUS_URL="http://${WALRUS_IP}:8773/services/Walrus/eucabot" # conf bucket
+ARCHIVE_TARBALL="eucabot-archive.tgz" # master copy of the database
+
+MOUNT_POINT="/srv/supybot" # archives and data are on ephemeral
+MOUNT_MOUNT_POINT="N" # whether to mount something there or
+ # just make a directory on the rootfs
+
+# do backup on walrus?
+RESTORE_FROM_WALRUS="Y"
+
+# Modification below this point are needed only to customize the behavior
+# of the script.
+
+# just sync the date first
+apt-get install --force-yes -y ntpdate
+ntpdate pool.ntp.org
+apt-get install --force-yes -y ntp
+sleep 60
+
+# the modified s3curl to interact with the above walrus
+S3CURL="/usr/bin/s3curl-euca.pl"
+
+# get the s3curl script
+echo "Getting ${S3CURL}"
+curl -s -f -o ${S3CURL} --url http://173.205.188.8:8773/services/Walrus/s3curl/s3curl-euca.pl
+chmod 755 ${S3CURL}
+
+# now let's setup the id for accessing walrus
+echo "Setting credentials for ${S3CURL}"
+cat > /root/.s3curl <<EOF
+%awsSecretAccessKeys = (
+ ${WALRUS_NAME} => {
+ url => '${WALRUS_IP}',
+ id => '${WALRUS_ID}',
+ key => '${WALRUS_KEY}',
+ },
+);
+EOF
+chmod 600 /root/.s3curl
+
+# update the instance
+echo "Upgrading and installing packages"
+export DEBIAN_FRONTEND=noninteractive
+export DEBIAN_PRIORITY=critical
+apt-get --force-yes -y update
+apt-get --force-yes -y upgrade
+
+hostname meetbot.eucalyptus.com
+
+grep -q meetbot /etc/hosts || echo '173.205.188.126 meetbot.eucalyptus.com meetbot' >> /etc/hosts
+
+# install deps
+echo "Installing dependencies"
+apt-get install --force-yes -y apache2 darcs git python-twisted-names
+
+wget http://www.eucalyptus.com/favicon.ico -O /var/www/favicon.ico
+
+# let's make sure we have the mountpoint
+echo "Creating and prepping ${MOUNT_POINT}"
+mkdir -p ${MOUNT_POINT}
+
+# don't mount ${MOUNT_POINT} more than once (mainly for debugging)
+if [[ "${MOUNT_MOUNT_POINT}" = Y ]] && ! mount | grep ${MOUNT_POINT}; then
+ # let's see where ephemeral is mounted, and either mount
+ # it in the final place (${MOUNT_POINT}) or mount -o bind
+ EPHEMERAL="`curl -s -f -m 20 http://169.254.169.254/latest/meta-data/block-device-mapping/ephemeral0`"
+ if [ -z "${EPHEMERAL}" ]; then
+ # workaround for a bug in EEE 2
+ EPHEMERAL="`curl -s -f -m 20 http://169.254.169.254/latest/meta-data/block-device-mapping/ephemeral`"
+ fi
+ if [ -z "${EPHEMERAL}" ]; then
+ echo "Cannot find ephemeral partition!"
+ exit 1
+ else
+ # let's see if it is mounted
+ if ! mount | grep ${EPHEMERAL} ; then
+ mount /dev/${EPHEMERAL} ${MOUNT_POINT}
+ else
+ mount -o bind `mount | grep ${EPHEMERAL} | cut -f 3 -d ' '` ${MOUNT_POINT}
+ fi
+ fi
+fi
+
+useradd -g www-data -M -N -r -s /usr/sbin/nologin supybot1
+useradd -g www-data -M -N -r -s /usr/sbin/nologin supybot2
+
+# now let's get the archives from the walrus bucket
+if [[ "$RESTORE_FROM_WALRUS" = Y ]]; then
+ echo "Retrieving eucabot archives and configuration"
+ ${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/${ARCHIVE_TARBALL} > /${MOUNT_POINT}/archive.tgz
+ if [ "`head -c 4 /${MOUNT_POINT}/archive.tgz`" = "<Err" ]; then
+ echo 'Failed to get archives!'
+ exit 1
+ else
+ tar -C ${MOUNT_POINT} -xzpf ${MOUNT_POINT}/archive.tgz
+ fi
+else
+ # Don't forget to write data/*/supybot.conf and data/*/conf/users.conf
+
+ # Supybot instances' data go in subdirectories of this
+ install -d -m 0710 ${MOUNT_POINT}/data -g www-data
+ install -d -m 0710 ${MOUNT_POINT}/meeting-logs -g www-data
+
+ # Plugins go here
+ install -d -m 0775 ${MOUNT_POINT}/plugins -g www-data
+
+ # Instance 1 lives on Freenode
+ install -d -m 0710 ${MOUNT_POINT}/data/1 -o supybot1
+ install -d -m 0750 ${MOUNT_POINT}/meeting-logs/1 -o supybot1 -g www-data
+ mkdir -p ${MOUNT_POINT}/data/1/conf
+ chown -R supybot1:www-data ${MOUNT_POINT}/data/1 ${MOUNT_POINT}/meeting-logs/1
+
+ # Instance 2 lives in Eucalyptus HQ
+ install -d -m 0710 ${MOUNT_POINT}/data/2 -o supybot2 -g www-data
+ install -d -m 0750 ${MOUNT_POINT}/meeting-logs/2 -o supybot2 -g www-data
+ mkdir -p ${MOUNT_POINT}/data/2/conf
+ chown -R supybot2:www-data ${MOUNT_POINT}/data/2 ${MOUNT_POINT}/meeting-logs/2
+fi
+
+# Install supybot
+tempdir=`mktemp -d`
+git clone --depth 1 git://github.com/ProgVal/Limnoria.git $tempdir/supybot
+pushd $tempdir/supybot
+python setup.py install
+popd
+rm -rf $tempdir
+
+# Install the MeetBot plugin
+darcs get http://anonscm.debian.org/darcs/collab-maint/MeetBot/ ${MOUNT_POINT}/plugins/MeetBot
+
+# Install remaining plugins
+tempdir=`mktemp -d`
+git clone --depth 1 git://github.com/gholms/supybot-plugins.git $tempdir/supybot-plugins-gholms
+mv -n $tempdir/supybot-plugins-gholms/* ${MOUNT_POINT}/plugins/
+git clone --depth 1 git://github.com/ProgVal/Supybot-plugins.git $tempdir/supybot-plugins-progval
+mv -nT $tempdir/supybot-plugins-progval/AttackProtector ${MOUNT_POINT}/plugins/AttackProtector
+rm -rf $tempdir
+
+chgrp -R www-data ${MOUNT_POINT}/plugins
+chmod -R g+rwX ${MOUNT_POINT}/plugins
+
+[[ "$RESTORE_FROM_WALRUS" != Y ]] && exit 0
+
+# let's setup apache's configuration
+echo "Configuring apache"
+cat >> /etc/ldap/ldap.conf << EOF
+BASE FIXME
+URI FIXME
+TLS_CACERT /etc/ssl/certs/gd_bundle.crt
+TLS_REQCERT demand
+EOF
+
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/supybot-apache-config > /etc/apache2/sites-available/supybot
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/gd_bundle.crt > /etc/ssl/certs/gd_bundle.crt
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/eucalyptus.com.crt > /etc/ssl/certs/eucalyptus.com.crt
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/eucalyptus.com.key > /etc/ssl/private/eucalyptus.com.key
+if [ "`head -c 4 /etc/apache2/sites-available/supybot`" = "<Err" ]; then
+ echo "Couldn't get apache configuration!"
+ exit 1
+fi
+a2dissite default
+a2ensite supybot
+a2enmod authnz_ldap
+a2enmod ssl
+service apache2 restart
+
+# Fetch /etc/init.d/supybot
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/supybot.init > /etc/init.d/supybot
+if [ "`head -c 4 /etc/init.d/supybot`" = "<Err" ]; then
+ echo "Couldn't get init script!"
+ exit 1
+fi
+chmod +x /etc/init.d/supybot
+update-rc.d supybot defaults
+
+# Set up a cron-job to save the archives and config to a bucket. It will
+# run as root
+echo "Preparing local script to push backups to walrus"
+cat >/etc/cron.hourly/eucabot-backup <<EOF
+#!/bin/sh
+chmod -R g+rwX ${MOUNT_POINT}/plugins
+tar -C ${MOUNT_POINT} -czpf ${MOUNT_POINT}/archive.tgz .
+# WARNING: the bucket in ${WALRUS_URL} *must* have been already created
+# keep one copy per day of the month
+${S3CURL} --id ${WALRUS_NAME} --put /${MOUNT_POINT}/archive.tgz -- -s ${WALRUS_URL}/${ARCHIVE_TARBALL}-day_of_month
+# and push it to be the latest backup too for easy recovery
+${S3CURL} --id ${WALRUS_NAME} --put /${MOUNT_POINT}/archive.tgz -- -s ${WALRUS_URL}/${ARCHIVE_TARBALL}
+# save the init script
+${S3CURL} --id ${WALRUS_NAME} --put /etc/init.d/supybot -- -s ${WALRUS_URL}/supybot.init
+# and save the aliases too
+${S3CURL} --id ${WALRUS_NAME} --put /etc/aliases -- -s ${WALRUS_URL}/aliases
+# finally the apache config file
+${S3CURL} --id ${WALRUS_NAME} --put /etc/apache2/sites-available/supybot -- -s ${WALRUS_URL}/supybot-apache-config
+rm ${MOUNT_POINT}/archive.tgz
+EOF
+# substitute to get the day of month
+sed -i 's/-day_of_month/-$(date +%d)/' /etc/cron.hourly/eucabot-backup
+
+# change execute permissions and ownership
+chmod +x /etc/cron.hourly/eucabot-backup
+
+# Start the bot(s)
+service supybot start
View
26 bash/debian/mailman.README
@@ -0,0 +1,26 @@
+Mailman Recipe
+--------------
+
+This recipe is a very simple recipe to be used with our test images
+(debian-based). It is used for our mailman installation of
+lists.eucalyptus.com.
+
+Details
+-------
+
+TBD
+
+Usage
+-----
+
+Make sure you have a security group which allow for port 25 (SMTP) to be
+open from the database clients. I usually add also ssh. For example:
+
+ euca-add-group -d "mailman security group" mailman
+ euca-authorize -p 25 -P tcp -s 0.0.0.0/0 mailman
+ euca-authorize -p 22 -P tcp -s 0.0.0.0/0 mailman
+
+When starting with one of our debian-based images you can do something
+like
+
+ euca-run-instance -g mailman -k XXXX emi-XXXXXX -f mailman.sh
View
249 bash/debian/mailman.sh
@@ -0,0 +1,249 @@
+#!/bin/bash
+#
+# Script to install mailman
+
+# variables associated with the cloud/walrus to use: CHANGE them to
+# reflect your walrus configuration
+WALRUS_NAME="community" # arbitrary name
+WALRUS_IP="173.205.188.8" # IP of the walrus to use
+WALRUS_ID="xxxxxxxxxxxxxxxxxxxxx" # EC2_ACCESS_KEY
+WALRUS_KEY="xxxxxxxxxxxxxxxxxxx" # EC2_SECRET_KEY
+WALRUS_URL="http://${WALRUS_IP}:8773/services/Walrus/mailman" # conf bucket
+WALRUS_MASTER="mailman-archive.tgz" # master copy of the database
+
+# mailman related configuration
+MAILNAME="lists.eucalyptus.com" # the public hostname
+POSTMASTER="community@eucalyptus.com" # email to receive exim errors
+MOUNT_POINT="/mailman" # archives and data are on ephemeral
+
+# do backup on walrus?
+WALRUS_BACKUP="Y"
+
+# Modification below this point are needed only to customize the behavior
+# of the script.
+
+# the modified s3curl to interact with the above walrus
+S3CURL="/usr/bin/s3curl-euca.pl"
+
+# get the s3curl script
+echo "Getting ${S3CURL}"
+curl -s -f -o ${S3CURL} --url http://173.205.188.8:8773/services/Walrus/s3curl/s3curl-euca.pl
+chmod 755 ${S3CURL}
+
+# now let's setup the id for accessing walrus
+echo "Setting credentials for ${S3CURL}"
+cat >${HOME}/.s3curl <<EOF
+%awsSecretAccessKeys = (
+ ${WALRUS_NAME} => {
+ url => '${WALRUS_IP}',
+ id => '${WALRUS_ID}',
+ key => '${WALRUS_KEY}',
+ },
+);
+EOF
+chmod 600 ${HOME}/.s3curl
+
+# update the instance
+echo "Upgrading and installing packages"
+apt-get --force-yes -y update
+apt-get --force-yes -y upgrade
+
+# make sure the mailname is correct
+echo "Setting hostname and mailname to ${MAILNAME}"
+echo "${MAILNAME}" > /etc/mailname
+echo "${MAILNAME}" > /etc/hostname
+hostname ${MAILNAME}
+LOCALIP="`curl -s -f -m 20 http://169.254.169.254/latest/meta-data/local-ipv4`"
+echo "${LOCALIP} ${MAILNAME}" >> /etc/hosts
+
+# mailman and exim requires some preseed to prevent questions
+echo "Preseeding debconf for mailman and exim"
+cat >/root/preseed.cfg <<EOF
+exim4-config exim4/dc_other_hostnames string
+exim4-config exim4/dc_eximconfig_configtype select internet site; mail is sent and received directly using SMTP
+exim4-config exim4/no_config boolean true
+exim4-config exim4/hide_mailname boolean
+exim4-config exim4/dc_postmaster string ${POSTMASTER}
+exim4-config exim4/dc_smarthost string
+exim4-config exim4/dc_relay_domains string
+exim4-config exim4/dc_relay_nets string
+exim4-base exim4/purge_spool boolean false
+exim4-config exim4/mailname string ${MAILNAME}
+exim4-config exim4/dc_readhost string
+# Reconfigure exim4-config instead of this package
+exim4-config exim4/use_split_config boolean false
+exim4-config exim4/dc_localdelivery select mbox format in /var/mail/
+exim4-config exim4/dc_local_interfaces string
+exim4-config exim4/dc_minimaldns boolean false
+
+mailman mailman/gate_news boolean false
+mailman mailman/site_languages multiselect en
+mailman mailman/queue_files_present select abort installation
+mailman mailman/used_languages string
+mailman mailman/default_server_language select en
+mailman mailman/create_site_list note
+EOF
+debconf-set-selections /root/preseed.cfg
+rm -f /root/preseed.cfg
+
+# install mailman
+echo "Installing mailman"
+apt-get install --force-yes -y mailman ntp ntpdate
+
+# just sync the date first
+ntpdate -s
+
+# let's make sure we have the mountpoint
+echo "Creating and prepping ${MOUNT_POINT}"
+mkdir -p ${MOUNT_POINT}
+
+# don't mount ${MOUNT_POINT} more than once (mainly for debugging)
+if ! mount |grep ${MOUNT_POINT}; then
+ # let's see where ephemeral is mounted, and either mount
+ # it in the final place (${MOUNT_POINT}) or mount -o bind
+ EPHEMERAL="`curl -s -f -m 20 http://169.254.169.254/latest/meta-data/block-device-mapping/ephemeral0`"
+ if [ -z "${EPHEMERAL}" ]; then
+ # workaround for a bug in EEE 2
+ EPHEMERAL="`curl -s -f -m 20 http://169.254.169.254/latest/meta-data/block-device-mapping/ephemeral`"
+ fi
+ if [ -z "${EPHEMERAL}" ]; then
+ echo "Cannot find ephemeral partition!"
+ exit 1
+ else
+ # let's see if it is mounted
+ if ! mount | grep ${EPHEMERAL} ; then
+ mount /dev/${EPHEMERAL} ${MOUNT_POINT}
+ else
+ mount -o bind `mount | grep ${EPHEMERAL} | cut -f 3 -d ' '` ${MOUNT_POINT}
+ fi
+ fi
+fi
+
+# now let's get the exim configured
+echo "Creating exim4 configuration"
+cat >/etc/exim4/update-exim4.conf.conf <<EOF
+# This is a Debian specific file
+
+dc_eximconfig_configtype='internet'
+dc_other_hostnames=''
+dc_local_interfaces=''
+dc_readhost=''
+dc_relay_domains=''
+dc_minimaldns='false'
+dc_relay_nets=''
+dc_smarthost=''
+CFILEMODE='644'
+dc_use_split_config='false'
+dc_hide_mailname=''
+dc_mailname_in_oh='true'
+dc_localdelivery='mail_spool'
+EOF
+cat >/etc/exim4/exim4.conf.localmacros <<EOF
+SYSTEM_ALIASES_USER = list
+SYSTEM_ALIASES_PIPE_TRANSPORT = address_pipe
+EOF
+
+# regenerate config and restart service
+service exim4 stop
+# there seems to be a bug for which exim is not properly stopped
+killall exim4
+rm /var/log/exim4/paniclog
+update-exim4.conf
+service exim4 start
+
+# let's setup apache's configuration
+echo "Configuring apache"
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/lists > /etc/apache2/sites-available/lists
+if [ "`head -c 4 /etc/apache2/sites-available/lists`" = "<Err" -o "`head -c 4 /etc/apache2/sites-available/lists`" = "Fail" ]; then
+ echo "Couldn't get apache configuration!"
+ exit 1
+fi
+a2dissite default
+a2ensite lists
+a2enmod rewrite
+service apache2 restart
+
+# now let's get the archives from the walrus bucket
+service mailman stop
+echo "Retrieving mailman archives and configuration"
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/${WALRUS_MASTER} > /${MOUNT_POINT}/master_copy.tgz
+mkdir /${MOUNT_POINT}/mailman
+if [ "`head -c 4 /${MOUNT_POINT}/master_copy.tgz`" = "<Err" -o "`head -c 4 /${MOUNT_POINT}/master_copy.tgz`" = "Fail" ]; then
+ echo "Couldn't get archives!"
+ exit 1
+else
+ tar -C /${MOUNT_POINT}/mailman -xzf /${MOUNT_POINT}/master_copy.tgz
+ mv /var/lib/mailman /var/lib/mailman.orig
+ ln -s /${MOUNT_POINT}/mailman /var/lib/mailman
+fi
+
+# and the aliases
+${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/aliases > /${MOUNT_POINT}/aliases
+if [ "`head -c 4 /${MOUNT_POINT}/aliases`" = "<Err" -o "`head -c 4 /${MOUNT_POINT}/aliases`" = "Fail" ]; then
+ echo "Couldn't get aliases!"
+ exit 1
+else
+ mv /etc/aliases /etc/aliases.orig
+ cp /${MOUNT_POINT}/aliases /etc/aliases
+ newaliases
+fi
+service mailman start
+
+# set up a cron-job to save the archives and config to a bucket: it will
+# run as root
+echo "Preparing local script to push backups to walrus"
+cat >/usr/local/bin/mailman_backup.sh <<EOF
+#!/bin/sh
+tar -C /var/lib/mailman -czf /${MOUNT_POINT}/archive.tgz .
+# check the bucket exists
+if ${S3CURL} --id ${WALRUS_NAME} -- ${WALRUS_URL}/${WALRUS_MASTER}|grep NoSuchBucket ; then
+ echo
+ echo "${WALRUS_URL}/${WALRUS_MASTER} does not exist: you need to"
+ echo "create it to have backups."
+ echo
+ exit 1
+fi
+# keep one copy per day of the month
+if ${S3CURL} --id ${WALRUS_NAME} --put /${MOUNT_POINT}/archive.tgz -- -s ${WALRUS_URL}/${WALRUS_MASTER}-day_of_month | grep -v ETag ; then
+ echo
+ echo "Failed to upload to walrus!"
+ exit 1
+fi
+# and push it to be the latest backup too for easy recovery
+if ${S3CURL} --id ${WALRUS_NAME} --put /${MOUNT_POINT}/archive.tgz -- -s ${WALRUS_URL}/${WALRUS_MASTER} |grep -v ETag; then
+ echo
+ echo "Failed to upload to walrus!"
+ exit 1
+fi
+# and save the aliases too
+if ${S3CURL} --id ${WALRUS_NAME} --put /etc/aliases -- -s ${WALRUS_URL}/aliases |grep -v ETag; then
+ echo
+ echo "Failed to upload to walrus!"
+ exit 1
+fi
+# finally the apache config file
+if ${S3CURL} --id ${WALRUS_NAME} --put /etc/apache2/sites-available/lists -- -s ${WALRUS_URL}/lists |grep -v ETag; then
+ echo
+ echo "Failed to upload to walrus!"
+ exit 1
+fi
+rm /${MOUNT_POINT}/archive.tgz
+EOF
+# substitute to get the day of month
+sed -i 's/-day_of_month/-$(date +%d)/' /usr/local/bin/mailman_backup.sh
+
+# change execute permissions and ownership
+chmod +x /usr/local/bin/mailman_backup.sh
+
+if [ "$WALRUS_BACKUP" != "Y" ]; then
+ # we are done here
+ exit 0
+fi
+
+# and turn it into a cronjob to run every hour
+echo "Setting up cron-job"
+cat >/tmp/crontab <<EOF
+30 * * * * /usr/local/bin/mailman_backup.sh
+EOF
+crontab /tmp/crontab
+
View
44 bash/debian/planet.README
@@ -0,0 +1,44 @@
+Planet Recipe
+--------------
+
+This script is to be sued with our test images (debian based). It will
+install and configure planet (http://intertwingly.net/code/venus/). This
+is the script we use for
+ http://planet.eucalyptus.com
+
+Details
+-------
+
+All the configurations for Planet is stored on a walrus bucket (for our
+planet.eucalyotus.com). To modify planet's configuration, we simply upload
+the file to the Walrus bucket and the cronjob will pick it up. Any s3 tool
+that allows to change the endpoint URL should work. To upload the modified
+config files we do:
+
+ s3curl --id <id> --acl public-read --put <configfile> -- $S3_URL/euca-planet/<configfile>
+
+The modified s3curl can be retrieved with
+ curl -f -o s3curl --url http://173.205.188.8:8773/services/Walrus/s3curl/s3curl-euca.pl
+
+
+Usage
+-----
+
+Make sure you have a security group that allows for connections to the web
+server. Tipically these are port 80 (http) and 443 (https). We usually add
+port 22 (ssh). For example
+
+ euca-add-group -d "webserver security group" webserver
+ euca-authorize -p 80 -P tcp -s 0.0.0.0/0 webserver
+ euca-authorize -p 443 -P tcp -s 0.0.0.0/0 webserver
+ euca-authorize -p 22 -P tcp -s 0.0.0.0/0 webserver
+
+When starting with one of our debian-based images you can do something
+like
+
+ euca-run-instance -k XXX -g webserver emi-XXXXXX -f planet.sh
+
+Since we have an elastic ip associate with the planet, the last step is to
+associate it to the running instance
+
+ euca-associate-address -i i-XXXXX planet.eucalyptus.com
View
78 bash/debian/planet.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# Simple script to setup planet (feed aggregator)
+#
+
+# where is the configuration stored
+WALRUS_URL="http://173.205.188.8:8773/services/Walrus/euca-planet/"
+
+# USER to own the planet's files (ubuntu for Ubuntu, and www-data for
+# Debian)
+PLANET_USER="www-data"
+PLANET_GROUP="www-data"
+
+# where is planet
+WHERE="/mnt"
+
+# update the instance
+apt-get --force-yes -y update
+apt-get --force-yes -y upgrade
+
+# install planet-venus
+apt-get install --force-yes -y planet-venus nginx
+
+# create planet's structure
+mkdir -pv ${WHERE}/cache ${WHERE}/output ${WHERE}/output/images ${WHERE}/theme ${WHERE}/theme/images
+echo "<html></html>" >${WHERE}/output/index.html
+cp -pv /usr/share/planet-venus/theme/common/* ${WHERE}/theme
+cp -pvr /usr/share/planet-venus/theme/default/* ${WHERE}/theme
+cp -pv /usr/share/planet-venus/theme/common/images/* ${WHERE}/output/images
+
+# let's create a script to update the skinning of the planet
+cat >${WHERE}/execute <<EOF
+#!/bin/sh
+curl -s -f -o ${WHERE}/planet.ini --url ${WALRUS_URL}/planet.ini
+curl -s -f -o ${WHERE}/theme/index.html.tmpl --url ${WALRUS_URL}/index.html.tmpl
+curl -s -f -o ${WHERE}/output/favicon.ico --url ${WALRUS_URL}/favicon.ico
+curl -s -f -o ${WHERE}/output/images/logo.png --url ${WALRUS_URL}/logo.png
+curl -s -f -o ${WHERE}/output/planet.css --url ${WALRUS_URL}/planet.css
+cd ${WHERE} && planet --verbose planet.ini
+EOF
+
+# change execut permission
+chmod +x ${WHERE}/execute
+
+# and turn it into a cronjob
+cat >${WHERE}/crontab <<EOF
+2,17,32,47 * * * * ${WHERE}/execute
+EOF
+
+# change the ownership of all planet stuff
+chown -R ${PLANET_USER}.${PLANET_GROUP} ${WHERE}
+su -c "${WHERE}/execute" - ${PLANET_USER}
+
+# and start the cronjob
+crontab -u ${PLANET_USER} ${WHERE}/crontab
+
+# let's remove the link to the default website
+rm /etc/nginx/sites-enabled/default
+
+# let's create our own simple configuration
+cat >/etc/nginx/sites-available/eucalyptus <<EOF
+server {
+ listen 80; ## listen for ipv4
+ listen [::]:80 default ipv6only=on; ## listen for ipv6
+ access_log /var/log/nginx/access.log;
+ location / {
+ root ${WHERE}/output;
+ index index.html;
+ }
+}
+EOF
+
+# and make it available
+ln -s /etc/nginx/sites-available/eucalyptus /etc/nginx/sites-enabled/eucalyptus
+
+# start the service
+/etc/init.d/nginx restart
+
View
60 bash/debian/postgres.README
@@ -0,0 +1,60 @@
+Postgres Recipe
+--------------
+
+This recipe is a very simple recipe to be used with our test images
+(debian-based). It will install postgres, then either use ephemeral
+(WARNING: DATA CAN BE LOST) or an EBS volume to store the database. It
+will load a previous database dump from a walrus bucket and can setup a
+cronjob to save the database to the same walrus bucket.
+
+Right now if very crude but it gets the job done to create our backend for
+projects.eucalyptus.com.
+
+Details
+-------
+
+The scripts may stop and wait for an EBS volume to show up (it expects
+/dev/sdb), otherwise it will use ephemeral for database storage: the user
+will need to configure which one (value of MOUNT_DEV).
+
+The database will be mounted under /postgres and the postgres
+configuration will be changed to allow network access to it. A modified
+s3curl will be download and install to access Walrus buckets: the key and
+id will need to be changes (values of WALRUS_ID and WALRUS_KEY) as well as
+the Walrus IP address (WALRUS_IP).
+
+s3curl will be used to donwload the previous database dump which will be
+in $WALRUS_URL/$WALRUS_MASTER (respectively Walrus URL with bucket and
+file name) and, if enabled (WALRUS_BACKUP="Y"), a cronjob will be setup to
+save the database into the same bucket. A copy of the database will be
+suffixed with the day of the month, thus a month worth of babkups will be
+saved in Walrus. A copy of the latest database dump will be saved in
+$WALRUS_MASTER to ensure that a restart of the script will alwasy load the
+latest dump.
+
+The walrus bucket needs to already exists. If you use s3curl you can
+create it with
+
+s3curl --id <my_walrus> --put -- $WALRUS_URL
+
+Usage
+-----
+
+Make sure you have a security group which allow for port 5432 to be open
+from the database clients. I usually add also ssh. For example:
+
+ euca-add-group -d "postgres security group" postgres
+ euca-authorize -p 5432 -P tcp -s 0.0.0.0/0 postgres
+ euca-authorize -p 22 -P tcp -s 0.0.0.0/0 postgres
+
+When starting with one of our debian-based images you can do something
+like
+
+ euca-run-instance -g postgres -k XXXX emi-XXXXXX -f postgresql.sh
+
+then when the instance is up and running if an EBS volume is used
+
+ euca-attach-volume -i i-XXXXXX -d /dev/sdb vol-XXXXXX
+
+After which the database should be running with the latest dump retrieved
+from the Walrus bucket.
View
178 bash/debian/postgres.sh
@@ -0,0 +1,178 @@
+#!/bin/bash
+#
+# Script to install postgres, and make it point to a volume (sdb) where
+# the database resides. The assumption is to have a Debian installation,
+# thus we'll look for Debian's style configuration and modify it
+# accordingly.
+
+# variables associated with the cloud/walrus to use: CHANGE them to
+# reflect your walrus configuration
+WALRUS_NAME="my_walrus" # arbitrary name
+WALRUS_IP="173.205.188.8" # IP of the walrus to use
+WALRUS_ID="xxxxxxxxxxxxxxxxxxxxx" # EC2_ACCESS_KEY
+WALRUS_KEY="xxxxxxxxxxxxxxxxxxx" # EC2_SECRET_KEY
+WALRUS_URL="http://${WALRUS_IP}:8773/services/Walrus/postgres" # conf bucket
+WALRUS_MASTER="backup" # master copy of the database
+
+# do backup on walrus?
+WALRUS_BACKUP="Y"
+
+# use MOUNT_DEV to wait for an EBS volume, otherwise we'll be using
+# ephemeral: WARNING when using ephemeral you may be loosing data uping
+# instance termination
+#MOUNT_DEV="/dev/sdb" # EBS device
+MOUNT_DEV="" # use ephemeral
+PG_VERSION="8.4"
+
+# where are the important directory/data
+CONF_DIR="/etc/postgresql/$PG_VERSION/main/"
+DATA_DIR="/var/lib/postgresql/$PG_VERSION/"
+MOUNT_POINT="/postgres"
+
+# user to use when working with the database
+USER="postgres"
+
+# Modification below this point are needed only to customize the behavior
+# of the script.
+
+# the modified s3curl to interact with the above walrus
+S3CURL="/usr/bin/s3curl-euca.pl"
+
+# get the s3curl script
+echo "Getting ${S3CURL}"
+curl -s -f -o ${S3CURL} --url http://173.205.188.8:8773/services/Walrus/s3curl/s3curl-euca.pl
+chmod 755 ${S3CURL}
+
+# now let's setup the id for accessing walrus
+echo "Setting credentials for ${S3CURL}"
+cat >${HOME}/.s3curl <<EOF
+%awsSecretAccessKeys = (
+ ${WALRUS_NAME} => {
+ url => '${WALRUS_IP}',
+ id => '${WALRUS_ID}',
+ key => '${WALRUS_KEY}',
+ },
+);
+EOF
+chmod 600 ${HOME}/.s3curl
+
+# let's make sure we have the mountpoint
+echo "Creating and prepping $MOUNT_POINT"
+mkdir -p $MOUNT_POINT
+
+# are we using ephemeral or EBS?
+if [ -z "$MOUNT_DEV" ]; then
+ # don't mount $MOUNT_POINT more than once (mainly for debugging)
+ if ! mount |grep $MOUNT_POINT; then
+ # let's see where ephemeral is mounted, and either mount
+ # it in the final place ($MOUNT_POINT) or mount -o bind
+ EPHEMERAL="`curl -f -m 20 http://169.254.169.254/latest/meta-data/block-device-mapping/ephemeral0`"
+ if [ -z "${EPHEMERAL}" ]; then
+ # workaround for a bug in EEE 2
+ EPHEMERAL="`curl -f -m 20 http://169.254.169.254/latest/meta-data/block-device-mapping/ephemeral`"
+ fi
+ if [ -z "${EPHEMERAL}" ]; then
+ echo "Cannot find ephemeral partition!"
+ exit 1
+ else
+ # let's see if it is mounted
+ if ! mount | grep ${EPHEMERAL} ; then
+ mount /dev/${EPHEMERAL} $MOUNT_POINT
+ else
+ mount -o bind `mount | grep ${EPHEMERAL} | cut -f 3 -d ' '` $MOUNT_POINT
+ fi
+ fi
+ fi
+else
+ # wait for the EBS volume and mount it
+ while ! mount $MOUNT_DEV $MOUNT_POINT ; do
+ echo "waiting for EBS volume ($MOUNT_DEV) ..."
+ sleep 10
+ done
+
+ # if there is already a database ($MOUNT_POINT/main) in the volume
+ # we'll use it, otherwise we will recover from walrus
+fi
+
+# update the instance
+echo "Upgrading and installing packages"
+apt-get --force-yes -y update
+apt-get --force-yes -y upgrade
+
+# install postgres
+apt-get install --force-yes -y postgresql libdigest-hmac-perl
+
+# stop the database
+echo "Setting up postgres"
+/etc/init.d/postgresql stop
+
+# change where the data directory is and listen to all interfaces
+sed -i "1,$ s;^\(data_directory\).*;\1 = '$MOUNT_POINT/main';" $CONF_DIR/postgresql.conf
+sed -i "1,$ s;^#\(listen_addresses\).*;\1 = '*';" $CONF_DIR/postgresql.conf
+
+# we need to set postgres to trust access from the network: euca-authorize
+# will do the rest
+cat >>$CONF_DIR/pg_hba.conf <<EOF
+# trust everyone: the user will set the firewall via ec2-authorize
+hostssl all all 0.0.0.0/0 md5
+EOF
+
+# let's make sure $USER can write in the right place
+chown ${USER} $MOUNT_POINT
+
+# now let's see if we have an already existing database on the target
+# directory
+if [ ! -d $MOUNT_POINT/main ]; then
+ # nope: let's recover from the bucket: let's get the default
+ # structure in
+ (cd $DATA_DIR; tar czf - *)|(cd $MOUNT_POINT; tar xzf -)
+
+ # start the database
+ /etc/init.d/postgresql start
+
+ # and recover from bucket
+ ${S3CURL} --id ${WALRUS_NAME} -- -s ${WALRUS_URL}/${WALRUS_MASTER} > $MOUNT_POINT/$WALRUS_MASTER
+ # check for error
+ if [ "`head -c 6 $MOUNT_POINT/$WALRUS_MASTER`" = "<Error" ]; then
+ echo "Cannot get backup!"
+ echo "Database is empty: disabling auto-backup."
+ WALRUS_BACKUP="N"
+ else
+ chown ${USER} $MOUNT_POINT/$WALRUS_MASTER
+ chmod 600 $MOUNT_POINT/$WALRUS_MASTER
+ su - -c "psql -f $MOUNT_POINT/$WALRUS_MASTER postgres" postgres
+ rm $MOUNT_POINT/$WALRUS_MASTER
+ fi
+else
+ # database is in place: just start
+ /etc/init.d/postgresql start
+fi
+
+# set up a cron-job to save the database to a bucket: it will run as root
+cat >/usr/local/bin/pg_backup.sh <<EOF
+#!/bin/sh
+su - -c "pg_dumpall > $MOUNT_POINT/$WALRUS_MASTER" ${USER}
+# WARNING: the bucket in ${WALRUS_URL} *must* have been already created
+# keep one copy per day of the month
+${S3CURL} --id ${WALRUS_NAME} --put $MOUNT_POINT/$WALRUS_MASTER -- -s ${WALRUS_URL}/${WALRUS_MASTER}-day_of_month
+# and push it to be the latest backup too for easy recovery
+${S3CURL} --id ${WALRUS_NAME} --put $MOUNT_POINT/$WALRUS_MASTER -- -s ${WALRUS_URL}/${WALRUS_MASTER}
+rm $MOUNT_POINT/$WALRUS_MASTER
+EOF
+# substitute to get the day of month
+sed -i 's/-day_of_month/-$(date +%d)/' /usr/local/bin/pg_backup.sh
+
+# change execute permissions and ownership
+chmod +x /usr/local/bin/pg_backup.sh
+
+if [ "$WALRUS_BACKUP" != "Y" ]; then
+ # we are done here
+ exit 0
+fi
+
+# and turn it into a cronjob to run every hour
+cat >/tmp/crontab <<EOF
+30 * * * * /usr/local/bin/pg_backup.sh
+EOF
+crontab /tmp/crontab
+
View
32 bash/debian/redmine
@@ -0,0 +1,32 @@
+# These modules must be enabled : passenger
+# Configuration for http://localhost/redmine
+<VirtualHost *:80>
+ ServerAdmin projects-administrator@eucalyptus.com
+ ServerName projects.eucalyptus.com
+ RewriteEngine On
+ RewriteRule ^(.*)$ https://projects.eucalyptus.com$1 [R=301,L]
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerAdmin projects-administrator@eucalyptus.com
+ ServerName projects.eucalyptus.com
+ # this is the passenger config
+ RailsEnv production
+ SetEnv X_DEBIAN_SITEID "default"
+ SetEnv RAILS_RELATIVE_URL_ROOT "/redmine"
+ # apache2 serves public files
+ DocumentRoot /usr/share/redmine/public
+ Alias "/redmine/plugin_assets/" /var/cache/redmine/default/plugin_assets/
+ Alias "/redmine" /usr/share/redmine/public
+ RewriteEngine On
+ RewriteRule ^$ /redmine [R=301]
+ SSLEngine on
+ SSLCertificateFile /etc/ssl/certs/eucalyptus-cert.crt
+ SSLCertificateKeyFile /etc/ssl/private/eucalyptus-nopass.key
+ SSLCertificateChainFile /etc/ssl/certs/gd_bundle.crt
+ <Directory "/usr/share/redmine/public">
+ Order allow,deny
+ Allow from all
+ </Directory>
+</VirtualHost>
+
View
71 bash/debian/redmine.README
@@ -0,0 +1,71 @@
+Redmine Recipe
+--------------
+
+This recipe is to be used with our test images (debian-based). It will
+install and configure redmine on an instance. The database is assumed to
+be on a different instance, and in our case we use the simple postgres
+script in PostgresRecipe.
+
+Files
+-----
+
+Files used:
+
+database.yml - database configuration for redmine
+email.yml - email configuration for redmine
+redmine - apache configuration for redmine
+redmine.sh - the script to configure the instance
+
+Details
+-------
+
+The scripts needs to have s3curl credentials to access private buckets
+where the configuration is stored. The credentials and walrus endpoint are
+defined at the beginning of the script and they will need to be configured
+for your redmine installation.
+
+The script will then acquire s3curl-euca.pl from a known location and use
+it to access the private bucket for the configuration files.
+
+With the script you will find examples of the configuration files we have
+in walrus to start our instance of redmine. All the values (passwords,
+hosts addresses) will need to be changed.
+
+email.yml: contains the configuration to allow redmine to send emails
+database.yml: contains the configuration to reach the database
+redmine: contains the apache configuration we use
+
+For the email there is an options to install a local SMTP server (exim4
+in this case): the values for the mailname and the email address to
+receive the admin emails will need to be modified.
+
+In database.yml we like to use the private IP address of the database
+instance so we don't need to expose the database externally.
+
+The variable PLUGINS can contains a list of github URL pointing to redmine
+plugins to be installed. WARNING: some plugins will requires a newer
+version of redmine! For example we needed to upgrade to testing to use
+PLUGINS="git://github.com/kulesa/redmine_better_gantt_chart.git"
+
+The scripts can also setup clones of remotes git repos (ie github) to
+allow redmine users to browse and reference code.
+
+Usage
+-----
+
+Since the instance is configured to run apache2-ssl, port 443 will need
+to be available, and usually port 22 (ssh) will be opened for maintenance
+purposes. To create a new security group and add the right permissions you
+can then:
+
+ euca-add-group -d "redmine security group" redmine
+ euca-authorize -p 443 -P tcp -s 0.0.0.0/0 redmine
+ euca-authorize -p 22 -P tcp -s 0.0.0.0/0 redmine
+
+
+When starting with one of our debian-based images you can do something
+like
+
+ euca-run-instance -f redmine.sh -g redmine -i mykey.priv emi-XXXXXX
+
+
View
189 bash/debian/redmine.sh
@@ -0,0 +1,189 @@
+#!/bin/bash
+#
+# Script to install redmine and make it use a remote database. The
+# database instance uses the PostgresRecipe.
+# This script uses s3curl (modified to work with Eucalyptus).
+
+# variables associated with the cloud/walrus to use: CHANGE them to
+# reflect your walrus configuration
+WALRUS_NAME="my_walrus" # arbitrary name
+WALRUS_IP="173.205.188.8" # IP of the walrus to use
+WALRUS_KEY="xxxxxxxxxxxxxxxxxxxx" # EC2_ACCESS_KEY
+WALRUS_ID="xxxxxxxxxxxxxxxxxxxx" # EC2_SECRET_KEY
+WALRUS_URL="http://${WALRUS_IP}:8773/services/Walrus/redmine" # conf bucket
+
+LOCAL_SMTP="Y" # install exim4 locally
+ROOT_EMAIL="my_name@example.com" # admin email recipient
+EMAIL_NAME="projects.example.com" # mailname
+
+# do we want extra plugins (which live in github)?
+#PLUGINS="https://github.com/thorin/redmine_ldap_sync.git https://github.com/kulesa/redmine_better_gantt_chart.git https://github.com/thorin/redmine_ldap_sync.git git://github.com/edavis10/redmine-google-analytics-plugin.git"
+PLUGINS=""
+
+# we do use git clone and a cronjob to have source code mirrored locally
+# for redmine's users consumptions
+GIT_REPO="/media/ephemeral0/repos"
+#REMOTE_GIT="git://github.com/EucalyptusSystems/Eucalyptus-Scripts.git git://github.com/EucalyptusSystems/s3curl.git https://github.com/LitheStoreDev/lithestore-cli.git"
+REMOTE_GIT=""
+REDMINE_USER="www-data"
+
+
+# Modification below this point are needed only to customize the behavior
+# of the script.
+
+# the modified s3curl to interact with the above walrus
+S3CURL="/usr/bin/s3curl-euca.pl"
+
+# get the s3curl script
+curl -s -f -o ${S3CURL} --url http://173.205.188.8:8773/services/Walrus/s3curl/s3curl-euca.pl
+chmod 755 ${S3CURL}
+
+# now let's setup the id for accessing walrus
+cat >${HOME}/.s3curl <<EOF
+%awsSecretAccessKeys = (
+ ${WALRUS_NAME} => {
+ url => '${WALRUS_IP}',
+ id => '${WALRUS_ID}',
+ key => '${WALRUS_KEY}',
+ },
+);
+EOF
+chmod 600 ${HOME}/.s3curl
+
+# update the instance
+apt-get --force-yes -y update
+apt-get --force-yes -y upgrade
+
+# preseed the answers for redmine (to avoid interactions, since we'll
+# override the config files with our own): we need debconf-utils
+apt-get --force-yes -y install debconf-utils
+cat >/root/preseed.cfg <<EOF
+redmine redmine/instances/default/dbconfig-upgrade boolean true
+redmine redmine/instances/default/dbconfig-remove boolean
+redmine redmine/instances/default/dbconfig-install boolean false
+redmine redmine/instances/default/dbconfig-reinstall boolean false
+redmine redmine/instances/default/pgsql/admin-pass password
+redmine redmine/instances/default/pgsql/app-pass password VLAvJOPLM8OP
+redmine redmine/instances/default/pgsql/changeconf boolean false
+redmine redmine/instances/default/pgsql/method select unix socket
+redmine redmine/instances/default/database-type select pgsql
+redmine redmine/instances/default/pgsql/manualconf note
+redmine redmine/instances/default/pgsql/authmethod-admin select ident
+redmine redmine/instances/default/pgsql/admin-user string postgres
+redmine redmine/instances/default/pgsql/authmethod-user select password
+EOF
+
+# set the preseed q/a
+debconf-set-selections /root/preseed.cfg
+rm -f /root/preseed.cfg
+
+# install local SMTP server is needed
+if [ "${LOCAL_SMTP}" = "Y" ]; then
+ # add more preseeding for exim4
+ cat >/root/preseed.cfg <<EOF
+exim4-daemon-light exim4-daemon-light/drec error
+exim4-config exim4/dc_other_hostnames string
+exim4-config exim4/dc_eximconfig_configtype select internet site; mail is sent and received directly using SMTP
+exim4-config exim4/no_config boolean true
+exim4-config exim4/hide_mailname boolean
+exim4-config exim4/dc_postmaster string ${ROOT_EMAIL}
+exim4-config exim4/dc_smarthost string
+exim4-config exim4/dc_relay_domains string
+exim4-config exim4/dc_relay_nets string
+exim4-base exim4/purge_spool boolean false
+exim4-config exim4/mailname string ${MAIL_NAME}
+exim4-config exim4/dc_readhost string
+exim4 exim4/drec error
+exim4-base exim4-base/drec error
+exim4-config exim4/use_split_config boolean false
+exim4-config exim4/dc_localdelivery select mbox format in /var/mail/
+exim4-config exim4/dc_local_interfaces string 127.0.0.1 ; ::1
+exim4-config exim4/dc_minimaldns boolean false
+EOF
+ # set the preseed q/a
+ debconf-set-selections /root/preseed.cfg
+ rm -f /root/preseed.cfg
+
+ # install exim4 now
+ apt-get install --force-yes -y exim4
+fi
+
+# install redmine and supporting packages
+apt-get install --force-yes -y redmine-pgsql redmine librmagick-ruby libapache2-mod-passenger apache2 libdbd-pg-ruby libdigest-hmac-perl git libopenid-ruby
+
+# now install plugins if we have them
+for x in $PLUGINS ; do
+ (cd /usr/share/redmine/vendors/plugin; git clone $x) || true
+done
+
+# now let's setup the git repo for the sources
+if [ "${REMOTE_GIT}" != "" ]; then
+ # create repo with the right permissions
+ mkdir -p ${GIT_REPO}
+ chown ${REDMINE_USER} ${GIT_REPO}
+ chmod 2755 ${GIT_REPO}
+
+ # save the ssh key of github into the redmine's user home
+ REDMINE_HOME="`getent passwd ${REDMINE_USER} |cut -d ':' -f 6`"
+ mkdir ${REDMINE_HOME}/.ssh
+ chown ${REDMINE_USER} ${REDMINE_HOME}/.ssh
+ chmod 700 ${REDMINE_HOME}/.ssh
+ ssh-keyscan github.com > ${REDMINE_HOME}/.ssh/known_hosts
+ chown ${REDMINE_USER} ${REDMINE_HOME}/.ssh/known_hosts
+fi
+
+# now let's clone the repos
+for x in ${REMOTE_GIT} ; do
+ # get the repos
+ (cd ${GIT_REPO}; su -c "git clone ${x}" ${REDMINE_USER})
+done
+
+# now let's setup the cronjob
+if [ "${REMOTE_GIT}" != "" ]; then
+ for x in `ls ${GIT_REPO}` ; do
+ echo "*/3 * * * * cd ${GIT_REPO}/${x} && git pull >> /dev/null" >> /tmp/redmine_cronjob
+ done
+
+ chown ${REDMINE_USER} /tmp/redmine_cronjob
+ crontab -u ${REDMINE_USER} /tmp/redmine_cronjob
+ rm /tmp/redmine_cronjob
+fi
+
+
+# since we are using apache2, let's stop it, disable the default web site
+# and enable the needed modules (passenger, ssl and rewrite)
+service apache2 stop
+a2dissite default
+a2dissite default-ssl
+a2enmod passenger
+a2enmod ssl
+a2enmod rewrite
+
+# we need the cert and key for ssl configuration
+${S3CURL} --id ${WALRUS_NAME} --get -- -s $WALRUS_URL/ssl-cert.pem > /etc/ssl/certs/ssl-cert.pem
+chmod 644 /etc/ssl/certs/ssl-cert.pem
+${S3CURL} --id ${WALRUS_NAME} --get -- -s $WALRUS_URL/ssl-cert.key > /etc/ssl/private/ssl-cert.key
+chgrp ssl-cert /etc/ssl/private/ssl-cert.key
+chmod 640 /etc/ssl/private/ssl-cert.key
+
+# let's setup redmine's email access and database
+${S3CURL} --id ${WALRUS_NAME} --get -- -s $WALRUS_URL/email.yml > /etc/redmine/default/email.yml
+chgrp www-data /etc/redmine/default/email.yml
+chmod 640 /etc/redmine/default/email.yml
+${S3CURL} --id ${WALRUS_NAME} --get -- -s $WALRUS_URL/database.yml > /etc/redmine/default/database.yml
+chgrp www-data /etc/redmine/default/database.yml
+chmod 640 /etc/redmine/default/database.yml
+
+# add a theme...
+cd /usr/share/redmine/public/themes
+mkdir -p martini/images
+mkdir -p martini/stylesheets
+${S3CURL} --id ${WALRUS_NAME} --get -- -s $WALRUS_URL/martini/images/loading.gif > martini/images/loading.gif
+${S3CURL} --id ${WALRUS_NAME} --get -- -s $WALRUS_URL/martini/stylesheets/application.css > martini/stylesheets/application.css
+
+# get redmine's configuration file and enable it
+${S3CURL} --id ${WALRUS_NAME} --get -- -s $WALRUS_URL/redmine > /etc/apache2/sites-available/redmine
+a2ensite redmine
+
+# start apache
+service apache2 start
View
45 bash/debian/varnish.README
@@ -0,0 +1,45 @@
+Varnish Recipe
+---------------
+
+This recipe is for deploying a varnish cache web server for information stored in a Walrus bucket. It has been developed to be used with our starter images (debian-based). This is what we use for deploying tar-gzipped EMIs though emis.eucalyptus.com.
+
+Details
+--------
+
+The variables that need to be set in the script are as follows:
+
+CLC_IP => IP address of the Cloud Controller
+WALRUS_IP => IP address of the Walrus
+EC2_ACCESS_KEY => EC2 Access Key (located in eucarc file)
+EC2_SECRET_KEY => EC2 Secret Key (located in eucarc file)
+INSTALL_PATH => Location where configs will be placed when downloading from Walrus
+
+The script will do the following:
+
+ * update the packages and repos on the instance (debian based)
+ * downloads the patched version of s3cmd that works with Eucalyptus Walrus
+ * installs varnish
+ * creates and attaches an EBS volume (used for varnish cache)
+ * downloads varnish configuration files from Walrus bucket, and updates varnish config (for more information about configuring varnish, please see https://www.varnish-cache.org/docs)
+
+After the EBS volume is created, it is formatted with an XFS filesystem, and mounted under /mnt/web-cache.
+
+Usage
+------
+
+Make sure you have a security group which allows the following ports to be open:
+
+ * the port you configure varnish to use for clients to access varnish web cache
+ * port 80 so that varnish can access Walrus
+ * ssh for instance access via ssh
+
+For example:
+
+ euca-add-group -d "varnish security group" varnish
+ euca-authorize -p <varnish port> -P tcp -s 0.0.0.0/0 varnish
+ euca-authorize -p 80 -P tcp -s <ip of walrus>/31 varnish
+ euca-authorize -p 22 -P tcp -s 0.0.0.0/0 varnish
+
+When starting with one of our debian-based images you can do something like
+
+ euca-run-instance -g varnish -k XXXX emi-XXXXXX -f varnish.sh
View
145 bash/debian/varnish.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+
+# Script installs varnishd, and attachs a volume for the storage of the cache
+# for varnishd. The configuration files of varnish are stored in Walrus.
+# Additionally, we log progress of everything in root. The assumption is to
+# have a Debian installation, thus we'll look for Debian's style configuration
+# and modify it accordingly.
+
+# export Cloud Controller IP Address
+export CLC_IP="<ip-address>"
+
+# export Walrus IP
+export WALRUS_IP="<ip-address>"
+
+# variables for euca2ools
+# Define EC2 variables for volume creation
+export EC2_URL="http://${CLC_IP}:8773/services/Eucalyptus"
+export EC2_ACCESS_KEY='xxxxxxxxxxxxxxxxxxxxxxxxxx'
+export EC2_SECRET_KEY='xxxxxxxxxxxxxxxxxxxxxxxxxx'
+
+# location where configs will be downloaded
+export INSTALL_PATH="/root"
+
+# bucket location and file name for varnish configs
+export VARNISH_BUCKET=""
+export VARNISH_CONFIG_FILENAME=""
+
+# Activate ec2-consistent-snapshot repo
+apt-key adv --keyserver keyserver.ubuntu.com --recv-keys BE09C571
+
+# Add euca2ools repo
+echo "deb http://eucalyptussoftware.com/downloads/repo/euca2ools/1.3.1/debian squeeze main" > /etc/apt/sources.list.d/euca2ools.list
+
+# Update repositories and packages
+apt-get --force-yes -y update
+apt-get --force-yes -y upgrade
+
+# Install euca2ools, sudo, python-boto, less, ntpdate, and mlocate
+apt-get --force-yes -y install sudo euca2ools python-boto less ntpdate mlocate
+
+# install varnish - HTTP Accelerator
+apt-get --force-yes -y install varnish
+
+# set date
+ntpdate pool.ntp.org
+
+# set up information for s3cmd
+# get s3cmd script
+cd /root
+wget http://173.205.188.8:8773/services/Walrus/s3-tools/s3cmd-0.9.8.3-patched.tgz
+tar -zxf s3cmd-0.9.8.3-patched.tgz
+
+# create s3curl config file
+echo "Creating s3cmd config file...."
+cat >${INSTALL_PATH}/s3cfg <<EOF
+[default]
+access_key = ${EC2_ACCESS_KEY}
+acl_public = False
+bucket_location = US
+debug_syncmatch = False
+default_mime_type = binary/octet-stream
+delete_removed = False
+dry_run = False
+encrypt = False
+force = False
+gpg_command = /usr/bin/gpg
+gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
+gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
+gpg_passphrase =
+guess_mime_type = False
+host_base = ${WALRUS_IP}:8773
+host_bucket = ${WALRUS_IP}:8773
+human_readable_sizes = False
+preserve_attrs = True
+proxy_host =
+proxy_port = 0
+recv_chunk = 4096
+secret_key = ${EC2_SECRET_KEY}
+send_chunk = 4096
+use_https = False
+verbosity = WARNING
+EOF
+chmod 600 ${INSTALL_PATH}/s3cfg
+
+# create volume used for EMI cache, then attach to instance
+
+INSTANCEID=`curl http://169.254.169.254/latest/meta-data/instance-id`
+VOLID=`euca-create-volume -s 15 -z production | awk '{print $2}'`
+
+printf "%s\n" "Volume ${VOLID} created successfully for ${INSTANCEID}." >> /root/volume-attach-status.txt
+
+if [ -n "${VOLID}" ] ; then
+ STATUS=`euca-describe-volumes ${VOLID} | awk '{print $5}'`
+ printf "%s\n" "Volume $VOLID has status of $STATUS." >> /root/volume-attach-status.txt
+
+ until [ "${STATUS}" = "available" ] ; do
+ sleep 10
+ printf "%s\n" "Volume ${VOLID} has status of ${STATUS}." >> /root/volume-attach-status.txt
+ STATUS=`euca-describe-volumes ${VOLID} | awk '{print $5}'`
+ done
+
+ euca-attach-volume --instance ${INSTANCEID} --device /dev/sdb ${VOLID}
+ while [ ! -e /dev/sdb ] ; do
+ echo "Waiting for volume to mount."
+ sleep 5
+ done
+ echo "Volume mounted."
+
+fi
+
+# if mounting was successful, then format the block device and mount it for varnish to use.
+if [ -e /dev/sdb ] ; then
+ # format device to xfs, and mount for varnish cache
+ printf "%s\n" "Formatting block device and mounting cache directory for varnish." >> /root/varnish-download-status.txt
+
+ mkfs.xfs /dev/sdb
+ sleep 10
+ echo "/dev/sdb /mnt/web-cache xfs noatime,nobarrier 0 0" | tee -a /etc/fstab
+ mkdir -p /mnt/web-cache
+ mount /mnt/web-cache
+ mkdir /mnt/web-cache/debian
+ chown -R varnish:varnish /mnt/web-cache/debian
+
+ printf "%s\n" "Done formatting and mounting /mnt/web-cache." >> /root/varnish-download-status.txt
+fi
+
+# if web cache is created successful, then grab varnish configs, then start varnish
+if [ -d /mnt/web-cache ] ; then
+
+ printf "%s\n" "Using s3cmd to download varnish configs..." >> /root/varnish-download-status.txt
+ ./s3cmd-0.9.8.3-patched/s3cmd --config=s3cfg get s3://starter-emis-config/varnish-configs.tgz ${INSTALL_PATH}/varnish-configs.tgz
+
+ if [ -e ${INSTALL_PATH}/varnish-configs.tgz ] ; then
+ echo "Untarring varnish config..."
+ printf "%s\n" "Untarring varnish config..." >> /root/varnish-download-status.txt
+ tar -zxf /root/varnish-configs.tgz
+ fi
+
+ if [ -e ${INSTALL_PATH}/default.vcl ] && [ -e ${INSTALL_PATH}/varnish ] ; then
+ /bin/cp /root/default.vcl /etc/varnish/default.vcl
+ /bin/cp /root/varnish /etc/default/varnish
+ /etc/init.d/varnish restart
+ fi
+
+fi
Please sign in to comment.
Something went wrong with that request. Please try again.