Skip to content
Browse files

Merge branch 'master' into cli-merge

Conflicts:
	.gitignore
  • Loading branch information...
2 parents e767f88 + ab3e8b6 commit 93182d6fb66b8a00c8ad8113c0d405c63a12ed80 @oldpatricka oldpatricka committed Jul 13, 2011
Showing with 1,965 additions and 26 deletions.
  1. +1 −0 .gitignore
  2. +2 −2 src/python/epumgmt/defaults/cloudinitd_load.py
  3. +4 −1 src/python/epumgmt/defaults/svc_adapter.py
  4. +9 −4 src/python/epumgmt/main/em_core_logfetch.py
  5. +7 −7 src/python/epumgmt/main/em_core_termination.py
  6. +173 −0 src/python/tests/configs/README.txt
  7. +2 −0 src/python/tests/configs/common/beentrue.sh
  8. +30 −0 src/python/tests/configs/common/deps.conf
  9. +77 −0 src/python/tests/configs/level1/basenode.json
  10. +98 −0 src/python/tests/configs/level1/chefsolo.sh
  11. +3 −0 src/python/tests/configs/level1/deps.conf
  12. +7 −0 src/python/tests/configs/level1/level1.conf
  13. +4 −0 src/python/tests/configs/level2/epu-cassandra.sh
  14. +5 −0 src/python/tests/configs/level2/level2.conf
  15. +98 −0 src/python/tests/configs/level3/chefsolo.sh
  16. +10 −0 src/python/tests/configs/level3/deps.conf
  17. +9 −0 src/python/tests/configs/level3/level3.conf
  18. +81 −0 src/python/tests/configs/level3/provisioner.json
  19. +18 −0 src/python/tests/configs/main.conf
  20. +98 −0 src/python/tests/configs/sleeper1/chefsolo.sh
  21. +14 −0 src/python/tests/configs/sleeper1/deps.conf
  22. +93 −0 src/python/tests/configs/sleeper1/one-epu.json
  23. +12 −0 src/python/tests/configs/sleeper1/sleeper1.conf
  24. +24 −0 src/python/tests/configs/sleeper1/state-wait.py
  25. +16 −0 src/python/tests/configs/sleeper2/deps.conf
  26. +12 −0 src/python/tests/configs/sleeper2/sleeper2.conf
  27. +35 −0 src/python/tests/configs/sleeper2/state-wait.py
  28. +142 −0 src/python/tests/configs/sleeper2/two-epus.json
  29. +3 −1 src/python/tests/mocks/common.py
  30. +8 −1 src/python/tests/mocks/event.py
  31. +19 −4 src/python/tests/mocks/modules.py
  32. +9 −0 src/python/tests/mocks/runlogs.py
  33. +110 −0 src/python/tests/test_epumgmt_api.py
  34. +103 −0 src/python/tests/test_epumgmt_default_common.py
  35. +84 −0 src/python/tests/test_epumgmt_defaults_cloudinitd_load.py
  36. +135 −0 src/python/tests/test_epumgmt_defaults_log_events.py
  37. +80 −1 src/python/tests/test_epumgmt_defaults_svc_adapter.py
  38. +126 −0 src/python/tests/test_epumgmt_main_em_core_findworkers.py
  39. +198 −0 src/python/tests/test_epumgmt_main_em_core_logfetch.py
  40. +6 −5 src/python/tests/test_epumgmt_main_em_core_status.py
View
1 .gitignore
@@ -4,6 +4,7 @@
build
dist
*.egg-info
+*.swp
var/epumgmt/logs/ec*
var/epumgmt/runlogs/*
var/epumgmt/confs_used/*
View
4 src/python/epumgmt/defaults/cloudinitd_load.py
@@ -9,9 +9,9 @@ def get_cloudinitd_service(cloudinitd, name):
"""Return the cloudinit.d service by exact name match or raise IncompatibleEnvironment"""
if not cloudinitd:
- raise Exception("requires cloudinitd reference")
+ raise ProgrammingError("requires cloudinitd reference")
if not name:
- raise Exception("requires service name")
+ raise ProgrammingError("requires service name")
noservicemsg = "Cannot find the '%s' service in cloudinit.d run '%s'" % (name, cloudinitd.run_name)
try:
aservice = cloudinitd.get_service(name)
View
5 src/python/epumgmt/defaults/svc_adapter.py
@@ -132,9 +132,12 @@ def worker_state(self, controllers, provisioner_vm):
if not self.is_channel_open():
raise IncompatibleEnvironment("Cannot get worker state without an open channel to the services")
- if not len(controllers):
+ if not controllers or not len(controllers):
raise InvalidInput("Empty controllers service name list")
+ if not provisioner_vm.hostname:
+ raise IncompatibleEnvironment("Cannot get state of provisionner that doesn't (yet) have a hostname")
+
filename = "epu-worker-state-%s" % str(uuid.uuid4())
(abs_homedir, abs_envfile) = \
View
13 src/python/epumgmt/main/em_core_logfetch.py
@@ -1,5 +1,7 @@
from epumgmt.api.exceptions import *
from epumgmt.main.em_core_load import get_cloudinit
+import epumgmt.defaults.epustates as epustates
+from em_core_status import _find_state_from_events as find_state_from_events
from threading import Thread
@@ -93,7 +95,7 @@ def fetch_by_vm_id(p, c, m, run_name, instanceid):
_fetch_one_vm(p, c, m, run_name, vm)
-def fetch_by_service_name(p, c, m, run_name, servicename):
+def fetch_by_service_name(p, c, m, run_name, servicename, cloudinitd=None):
"""Fetch log files from the VM instance(s) in this run that were started
with the role of this service name.
@@ -119,7 +121,7 @@ def fetch_by_service_name(p, c, m, run_name, servicename):
raise IncompatibleEnvironment("Cannot find any active VMs associated with run '%s' with the service type/name '%s'" % (run_name, servicename))
for vm in vms:
- _fetch_one_vm(p, c, m, run_name, vm)
+ _fetch_one_vm(p, c, m, run_name, vm, cloudinitd=cloudinitd)
# -----------------------------------------------------------------
@@ -135,10 +137,13 @@ def _get_runvms_required(c, m, run_name, cloudinitd):
else:
c.log.warn("Cannot get worker status: there is no channel open to the EPU controllers")
+ run_vms = filter(lambda x: find_state_from_events(x) != epustates.TERMINATED, run_vms)
+
return run_vms
-def _fetch_one_vm(p, c, m, run_name, vm):
+def _fetch_one_vm(p, c, m, run_name, vm, cloudinitd=None):
c.log.info("fetching logs from '%s' instance '%s' (run '%s')" % (vm.service_type, vm.instanceid, run_name))
- cloudinitd = get_cloudinit(p, c, m, run_name)
+ if not cloudinitd:
+ cloudinitd = get_cloudinit(p, c, m, run_name)
scpcmd = m.runlogs.get_scp_command_str(c, vm, cloudinitd)
m.runlogs.fetch_logs(scpcmd)
View
14 src/python/epumgmt/main/em_core_termination.py
@@ -1,6 +1,6 @@
from epumgmt.api.exceptions import *
from epumgmt.main.em_core_load import get_cloudinit_for_destruction
-import time
+import em_core_logfetch
def terminate(p, c, m, run_name, cloudinitd):
"""Destroy all VM instances that are part of the run.
@@ -13,14 +13,14 @@ def terminate(p, c, m, run_name, cloudinitd):
c.log.warn("Problem with access to the services, cannot terminate workers without this channel")
c.log.info("Killing only the cloudinit.d-launched nodes.")
else:
- if not m.remote_svc_adapter.kill_all_workers():
+ c.log.info("Terminating all workers in run '%s'" % run_name)
+ if m.remote_svc_adapter.kill_all_workers():
+ c.log.info("Terminated all workers in run '%s'" % run_name)
+ else:
raise UnexpectedError("Problem triggering worker termination, you need to make sure these are terminated manually!")
- # TODO: here, we need to make sure the provisioner is done killing things with some mechanism like RPC.
- # This will require some thought and design. For now, this happens fairly instantly if
- # the IaaS service is available, etc. But we should know for sure before proceeding.
- c.log.info("Sent signal to the provisioner, waiting for it to terminate all workers in run '%s'" % run_name)
- time.sleep(5)
+ em_core_logfetch.fetch_by_service_name(p, c, m, run_name, "provisioner")
+ c.log.info("Shutting down all services launched by cloudinit.d for '%s'" % run_name)
# Need a different instantiation of cloudinitd for shutdown
cloudinitd_terminate = get_cloudinit_for_destruction(p, c, m, run_name)
cloudinitd_terminate.shutdown()
View
173 src/python/tests/configs/README.txt
@@ -0,0 +1,173 @@
+==============================================================================
+
+See the documentation here: http://...
+
+==============================================================================
+
+NOTE: This README and the online documentation discusses *our use* of
+cloudinit.d, we use it in a particular pattern with particular tools (for
+example, Chef, which is not required). If you want to do something
+differently, there is usually a way to make it happen.
+
+==============================================================================
+
+I. Quick guide for the impatient:
+
+Export the following environment variables into your shell:
+
+Export the following environment variables into your shell:
+
+ # Credentials for Nimbus
+ export CTXBROKER_KEY=`cat ~/.secrets/CTXBROKER_KEY`
+ export CTXBROKER_SECRET=`cat ~/.secrets/CTXBROKER_SECRET`
+ export NIMBUS_KEY=`cat ~/.secrets/NIMBUS_KEY`
+ export NIMBUS_SECRET=`cat ~/.secrets/NIMBUS_SECRET`
+
+ # Credentials for EC2
+ # The provisioner uses to start worker nodes on EC2 in some situations
+ export AWS_ACCESS_KEY_ID=`cat ~/.secrets/AWS_ACCESS_KEY_ID`
+ export AWS_SECRET_ACCESS_KEY=`cat ~/.secrets/AWS_SECRET_ACCESS_KEY`
+
+ # Credentials for cloudinit.d itself
+ # cloudinit.d uses to start the base nodes
+ export CLOUDBOOT_IAAS_ACCESS_KEY="$AWS_ACCESS_KEY_ID"
+ export CLOUDBOOT_IAAS_SECRET_KEY="$AWS_SECRET_ACCESS_KEY"
+
+ # Credentials for Cassandra
+ # You make these up
+ export CASSANDRA_USERNAME="mamacass"
+ export CASSANDRA_PASSWORD=`uuidgen`
+
+ # Credentials for RabbitMQ
+ # You make these up
+ export RABBITMQ_USERNAME="easterbunny"
+ export RABBITMQ_PASSWORD=`uuidgen`
+
+ # If you are running your own Cassandra instance outside the launch
+ # plan, this HAS to change every launch.
+ export EXCHANGE_SCOPE="sysname123"
+
+Run:
+
+ RUN_NAME = "my_run_name"
+ if [ -n $EXCHANGE_SCOPE ]; then
+ RUN_NAME=$EXCHANGE_SCOPE
+ fi
+ cloudinitd boot main.conf -v -v -v -l debug -x -n $RUN_NAME
+
+Inspect:
+
+ epumgmt -a status -n $RUN_NAME
+
+==============================================================================
+
+II. For launch plan authors: conventions
+
+There are three layers of value substitutions to understand.
+
+1. The "deps.conf" files (and "deps-common.conf") contain key/value pairs.
+
+ There are two kinds of values. Examples:
+
+ 1A. Literal
+ epu_git_repo: https://github.com/ooici/epu.git
+
+ 1B. Variable
+ broker_ip_address: ${basenode.hostname}
+
+ In the literal kind, you have a straight string value.
+
+ In the variable kind, you are telling cloudinit.d that a service called
+ "x" provides a dynamic value from the launch (in this example, a service
+ called "basenode" provides "hostname" -- when this key "broker_ip_address"
+ is desired later, cloudinit.d will provide the hostname value from wherever
+ the "svc-basenode" service ended up).
+
+2. Then there are the json files.
+
+ These are configuration files for chef-solo that are run on the VM instances
+ that get started. These files are more complicated than simple key/value,
+ but there is the same idea present: some values are literal, others obtained
+ via substitution.
+
+ Any substitution here comes from the *deps files*. For example, if you list
+ "${broker_ip_address}", the value will come from the dep file containing that
+ key. For each service you can explicitly list which deps files are "in play"
+ for that substitution.
+
+ For every cloudinit.d launch, temporary files are created with all of the
+ substitutions enacted. These files are what get transferred to the VM and
+ serve as input to the boot-time contextualization program: in our case this
+ is chef-solo.
+
+3. The third and final layer of substitution is in the chef recipes themselves.
+ These recipes make references to variables in the json files. These json
+ files are sent to the node as literal configuration files. You can always
+ debug a chef recipe by looking at the configuration file that is given to
+ chef-solo and finding the exact string value that was in play.
+
+==============================================================================
+
+III. For launch plan authors: chef json files
+
+Rules for the bootconf json files when using the main recipe "X" which is
+what we use most of the time.
+
+* appretrieve:retrieve_method
+
+ This can have the value 'archive' or 'git'.
+
+ When it is 'archive', the file configured at "appretrieve:archive_url" is
+ retrieved over http and it is assumed to be a tar.gz archive.
+
+ When it is 'git', the following configurations are used:
+ * appretrieve:git_repo
+ * appretrieve:git_branch
+ * appretrieve:git_commit
+
+ Note that those are the controls for the "thing installed".
+
+ All subsequent dependency resolution happens via the dependency lists that
+ come as part of that installation -- by way of the server listed in the
+ "appinstall:package_repo" configuration.
+
+* appinstall:package_repo
+
+ The "thing installed" has a dependency list and this package repository
+ configuration is what is used during the installation process to resolve
+ the dependencies.
+
+* appinstall:install_method
+
+ This can have the following values:
+
+ * py_venv_setup
+ Create a new virtualenv, install using "python setup.py install"
+
+ * py_venv_buildout
+ Create a new virtualenv, install using "bootstrap.py" and "bin/buildout"
+
+ * Future: more options for "burned" setups.
+
+* apprun:run_method
+
+ This can have the following values:
+
+ * sh
+ The old default, create a shell script for each service listed in the
+ "services" section in the json file. Then start that shell script (unless
+ the service is also listed in the "do_not_start" section, for an example
+ see the provisioner.json file).
+
+ * supervised
+ The new default, each service listed in the "services" section in the json
+ file is watched by a supervisor process. This will monitor the unix process
+ and communicate failures off of the machine.
+
+
+
+
+==============================================================================
+
+
+
View
2 src/python/tests/configs/common/beentrue.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+exec /bin/true
View
30 src/python/tests/configs/common/deps.conf
@@ -0,0 +1,30 @@
+[deps]
+
+# Common ION configurations
+exchange_scope: env.EXCHANGE_SCOPE
+ion_log_level: INFO
+
+# Common retrieval configurations
+package_repo: http://ooici.net/packages
+
+# For EPU nodes
+epu_retrieve_method: git
+epu_archive_name: epu-0.3.1.tar.gz
+epu_git_repo: https://github.com/ooici/epu.git
+epu_git_branch: master
+epu_git_commit: HEAD
+
+# For worker nodes
+integration_retrieve_method: git
+integration_archive_name: integration-0.3.0.tar.gz
+integration_git_repo: https://github.com/ooici/ion-integration.git
+integration_git_branch: develop
+integration_git_commit: HEAD
+
+# cassandra credentials
+cassandra_username: env.CASSANDRA_USERNAME
+cassandra_password: env.CASSANDRA_PASSWORD
+
+# rabbitmq credentials
+rabbitmq_username: env.RABBITMQ_USERNAME
+rabbitmq_password: env.RABBITMQ_PASSWORD
View
77 src/python/tests/configs/level1/basenode.json
@@ -0,0 +1,77 @@
+{
+ "cassandra":{
+ "authenticator":"org.apache.cassandra.auth.SimpleAuthenticator",
+ "users":{"${cassandra_username}":"${cassandra_password}"},
+ "rpc_address":"0.0.0.0"
+ },
+ "rabbitmq":{
+ "vhosts":["/"],
+ "users":{
+ "${rabbitmq_username}":{
+ "password":"${rabbitmq_password}",
+ "permissions":{"/":{"conf":".*", "write":".*", "read":".*"}}
+ }
+ }
+ },
+ "ioncontainers": {
+ "queuestat-container": [
+ {
+ "name": "epu-queuestat",
+ "args": { },
+ "config": {
+ "epu.ionproc.queuestat":{"interval_seconds": "3.0"}
+ },
+ "version": "0.1"
+ }
+ ]
+ },
+ "local_app_confs":{
+ "epu.cassandra":{
+ "hostname":"localhost",
+ "port":9160,
+ "username":"${cassandra_username}",
+ "password":"${cassandra_password}",
+ "keyspace":"${exchange_scope}"
+ }
+ },
+ "universal_app_confs":{
+ "broker_host_list":"127.0.0.1",
+ "sysname":"${exchange_scope}"
+ },
+ "virtualenv": {
+ "path" : "/home/basenode/app-venv"
+ },
+ "pythoncc":{
+ "broker":"127.0.0.1",
+ "sysname":"${exchange_scope}",
+ "broker_heartbeat":"5",
+ "broker_username":"${rabbitmq_username}",
+ "broker_password":"${rabbitmq_password}",
+ "log_level":"${ion_log_level}"
+ },
+ "appretrieve":{
+ "retrieve_method":"${epu_retrieve_method}",
+ "archive_url":"${package_repo}/${epu_archive_name}",
+ "git_repo":"${epu_git_repo}",
+ "git_branch":"${epu_git_branch}",
+ "git_commit":"${epu_git_commit}"
+ },
+ "appinstall":{
+ "install_method":"py_venv_setup",
+ "package_repo":"${package_repo}"
+ },
+ "apprun":{
+ "run_method":"supervised"
+ },
+ "dtdata":{
+ "retrieve_method":"git",
+ "archive_url":"http://ooici.net/releases/dt-data-0.1.tar.gz",
+ "git_repo":"https://github.com/oldpatricka/dt-data.git",
+ "git_branch":"master",
+ "git_commit":"HEAD",
+ "chef_debug_level":"debug"
+ },
+ "username":"basenode",
+ "groupname":"users",
+ "recipes":["user", "rabbitmq", "rabbitmq_cookie", "cassandra", "r1app"]
+}
View
98 src/python/tests/configs/level1/chefsolo.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+GIT_URL="https://github.com/oldpatricka/dt-data.git"
+GIT_BRANCH="master"
+CHEF_LOGLEVEL="info"
+
+# ========================================================================
+
+CMDPREFIX=""
+if [ `id -u` -ne 0 ]; then
+ CMDPREFIX="sudo "
+fi
+
+if [ ! -d /opt ]; then
+ $CMDPREFIX mkdir /opt
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+fi
+
+if [ -d /opt/dt-data ]; then
+ (cd /opt/dt-data && $CMDPREFIX git fetch)
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+else
+ (cd /opt && $CMDPREFIX git clone $GIT_URL )
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+fi
+
+(cd /opt/dt-data && $CMDPREFIX git checkout $GIT_BRANCH )
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+(cd /opt/dt-data && $CMDPREFIX git pull )
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+
+echo "Retrieved the dt-data repository, HEAD is currently:"
+(cd /opt/dt-data && $CMDPREFIX git rev-parse HEAD)
+echo ""
+
+$CMDPREFIX mkdir -p /opt/dt-data/run
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+$CMDPREFIX mv bootconf.json /opt/dt-data/run/basenodechefroles.json
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+cat >> chefconf.rb << "EOF"
+cookbook_path "/opt/dt-data/cookbooks"
+log_level :info
+file_store_path "/opt/dt-data/tmp"
+file_cache_path "/opt/dt-data/tmp"
+Chef::Log::Formatter.show_time = false
+
+EOF
+
+$CMDPREFIX mv chefconf.rb /opt/dt-data/run/basenodechefconf.rb
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+cat >> rerun-basenodechef.sh << "EOF"
+#!/bin/bash
+CHEFLEVEL="info"
+if [ "X" != "X$1" ]; then
+ CHEFLEVEL=$1
+fi
+rm -rf /home/basenode/app
+rm -rf /home/basenode/app-venv
+chef-solo -l $CHEFLEVEL -c /opt/dt-data/run/basenodechefconf.rb -j /opt/dt-data/run/basenodechefroles.json
+exit $?
+EOF
+
+chmod +x rerun-basenodechef.sh
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+$CMDPREFIX mv rerun-basenodechef.sh /opt/rerun-basenodechef.sh
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+echo "Running chef-solo"
+$CMDPREFIX /opt/rerun-basenodechef.sh #debug
+if [ $? -ne 0 ]; then
+ exit 1
+fi
View
3 src/python/tests/configs/level1/deps.conf
@@ -0,0 +1,3 @@
+[deps]
+broker_ip_address: ${basenode.hostname}
+cassandra_hostname: ${basenode.hostname}
View
7 src/python/tests/configs/level1/level1.conf
@@ -0,0 +1,7 @@
+[svc-basenode]
+bootconf: basenode.json
+bootpgm: chefsolo.sh
+readypgm: ../common/beentrue.sh
+deps: ../common/deps.conf
+
+scp_username: basenode
View
4 src/python/tests/configs/level2/epu-cassandra.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+cd /home/basenode/app
+sudo -u basenode /home/basenode/app/scripts/run_under_env.sh /home/basenode/app-venv/bin/activate epu-cassandra-schema
View
5 src/python/tests/configs/level2/level2.conf
@@ -0,0 +1,5 @@
+[svc-cassandra_schema]
+hostname: ${basenode.hostname}
+bootpgm: epu-cassandra.sh
+readypgm: ../common/beentrue.sh
+scp_username: basenode
View
98 src/python/tests/configs/level3/chefsolo.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+GIT_URL="https://github.com/oldpatricka/dt-data.git"
+GIT_BRANCH="master"
+CHEF_LOGLEVEL="info"
+
+# ========================================================================
+
+CMDPREFIX=""
+if [ `id -u` -ne 0 ]; then
+ CMDPREFIX="sudo "
+fi
+
+if [ ! -d /opt ]; then
+ $CMDPREFIX mkdir /opt
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+fi
+
+if [ -d /opt/dt-data ]; then
+ (cd /opt/dt-data && $CMDPREFIX git fetch)
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+else
+ (cd /opt && $CMDPREFIX git clone $GIT_URL )
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+fi
+
+(cd /opt/dt-data && $CMDPREFIX git checkout $GIT_BRANCH )
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+(cd /opt/dt-data && $CMDPREFIX git pull )
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+
+echo "Retrieved the dt-data repository, HEAD is currently:"
+(cd /opt/dt-data && $CMDPREFIX git rev-parse HEAD)
+echo ""
+
+$CMDPREFIX mkdir -p /opt/dt-data/run
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+$CMDPREFIX mv bootconf.json /opt/dt-data/run/provisionerchefroles.json
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+cat >> chefconf.rb << "EOF"
+cookbook_path "/opt/dt-data/cookbooks"
+log_level :info
+file_store_path "/opt/dt-data/tmp"
+file_cache_path "/opt/dt-data/tmp"
+Chef::Log::Formatter.show_time = false
+
+EOF
+
+$CMDPREFIX mv chefconf.rb /opt/dt-data/run/provisionerchefconf.rb
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+cat >> rerun-provisionerchef.sh << "EOF"
+#!/bin/bash
+CHEFLEVEL="info"
+if [ "X" != "X$1" ]; then
+ CHEFLEVEL=$1
+fi
+rm -rf /home/cc/app
+rm -rf /home/cc/app-venv
+chef-solo -l $CHEFLEVEL -c /opt/dt-data/run/provisionerchefconf.rb -j /opt/dt-data/run/provisionerchefroles.json
+exit $?
+EOF
+
+chmod +x rerun-provisionerchef.sh
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+$CMDPREFIX mv rerun-provisionerchef.sh /opt/rerun-provisionerchef.sh
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+echo "Running chef-solo"
+$CMDPREFIX /opt/rerun-provisionerchef.sh #debug
+if [ $? -ne 0 ]; then
+ exit 1
+fi
View
10 src/python/tests/configs/level3/deps.conf
@@ -0,0 +1,10 @@
+[deps]
+
+ctxbroker_key: env.CTXBROKER_KEY
+ctxbroker_secret: env.CTXBROKER_SECRET
+ec2_key: env.AWS_ACCESS_KEY_ID
+ec2_secret: env.AWS_SECRET_ACCESS_KEY
+
+# Unused in this sample plan:
+nimbus_key: env.NIMBUS_KEY
+nimbus_secret: env.NIMBUS_SECRET
View
9 src/python/tests/configs/level3/level3.conf
@@ -0,0 +1,9 @@
+[svc-provisioner]
+hostname: ${basenode.hostname}
+bootconf: provisioner.json
+bootpgm: chefsolo.sh
+readypgm: ../common/beentrue.sh
+deps1: ../common/deps.conf
+deps2: ../level1/deps.conf
+deps3: deps.conf
+scp_username: cc
View
81 src/python/tests/configs/level3/provisioner.json
@@ -0,0 +1,81 @@
+{
+ "ioncontainers":{
+ "provisioner-query-container": [
+ {
+ "name": "epu-provisioner-query",
+ "args" : {},
+ "config": {}
+ }
+ ],
+ "provisioner-container1": [
+ {
+ "name": "epu-provisioner",
+ "args": { },
+ "config": {
+ "epu.ionproc.provisioner":{
+ "context_uri":"https://nimbus.ci.uchicago.edu:8888/ContextBroker/ctx/",
+ "context_key":"${ctxbroker_key}",
+ "context_secret":"${ctxbroker_secret}",
+ "sites":{
+ "ec2-west":{
+ "driver_class":"libcloud.drivers.ec2.EC2USWestNodeDriver",
+ "driver_kwargs":{
+ "key":"${ec2_key}",
+ "secret":"${ec2_secret}"
+ }
+ }
+ },
+ "cassandra_hostname":"${cassandra_hostname}",
+ "cassandra_username":"${cassandra_username}",
+ "cassandra_password":"${cassandra_password}",
+ "cassandra_keyspace":"${exchange_scope}",
+ "cassandra_port":9160
+ }
+ },
+ "version": "0.1"
+ }
+ ],
+ "dtrs-container": [
+ {
+ "name": "epu-dtrs",
+ "args": { },
+ "config": { },
+ "version": "0.1"
+ }
+ ]
+ },
+ "local_app_confs":{
+ },
+ "universal_app_confs":{
+ "broker_host_list":"${broker_ip_address}",
+ "sysname":"${exchange_scope}"
+ },
+ "virtualenv": {
+ "path" : "/home/cc/app-venv"
+ },
+ "pythoncc":{
+ "broker":"${broker_ip_address}",
+ "sysname":"${exchange_scope}",
+ "broker_heartbeat":"5",
+ "broker_username":"${rabbitmq_username}",
+ "broker_password":"${rabbitmq_password}",
+ "log_level":"${ion_log_level}"
+ },
+ "appretrieve":{
+ "retrieve_method":"${epu_retrieve_method}",
+ "archive_url":"${package_repo}/${epu_archive_name}",
+ "git_repo":"${epu_git_repo}",
+ "git_branch":"${epu_git_branch}",
+ "git_commit":"${epu_git_commit}"
+ },
+ "appinstall":{
+ "install_method":"py_venv_setup",
+ "package_repo":"${package_repo}"
+ },
+ "apprun":{
+ "run_method":"supervised"
+ },
+ "username":"cc",
+ "groupname":"users",
+ "recipes":["r1app"]
+}
View
18 src/python/tests/configs/main.conf
@@ -0,0 +1,18 @@
+[defaults]
+iaas_key: env.CLOUDBOOT_IAAS_ACCESS_KEY
+iaas_secret: env.CLOUDBOOT_IAAS_SECRET_KEY
+
+image: ami-0bbaea4e
+iaas: us-west-1
+allocation: m1.large
+sshkeyname: ooi
+localsshkeypath: ~/.ssh/ooi-west.pem
+ssh_username: ubuntu
+scp_username: cc
+
+[runlevels]
+level1: level1/level1.conf
+level2: level2/level2.conf
+level3: level3/level3.conf
+level4: sleeper1/sleeper1.conf
+#level4: sleeper2/sleeper2.conf
View
98 src/python/tests/configs/sleeper1/chefsolo.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+GIT_URL="https://github.com/oldpatricka/dt-data.git"
+GIT_BRANCH="master"
+CHEF_LOGLEVEL="info"
+
+# ========================================================================
+
+CMDPREFIX=""
+if [ `id -u` -ne 0 ]; then
+ CMDPREFIX="sudo "
+fi
+
+if [ ! -d /opt ]; then
+ $CMDPREFIX mkdir /opt
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+fi
+
+if [ -d /opt/dt-data ]; then
+ (cd /opt/dt-data && $CMDPREFIX git fetch)
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+else
+ (cd /opt && $CMDPREFIX git clone $GIT_URL )
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+fi
+
+(cd /opt/dt-data && $CMDPREFIX git checkout $GIT_BRANCH )
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+(cd /opt/dt-data && $CMDPREFIX git pull )
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+
+echo "Retrieved the dt-data repository, HEAD is currently:"
+(cd /opt/dt-data && $CMDPREFIX git rev-parse HEAD)
+echo ""
+
+$CMDPREFIX mkdir -p /opt/dt-data/run
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+$CMDPREFIX mv bootconf.json /opt/dt-data/run/controllerschefroles.json
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+cat >> chefconf.rb << "EOF"
+cookbook_path "/opt/dt-data/cookbooks"
+log_level :info
+file_store_path "/opt/dt-data/tmp"
+file_cache_path "/opt/dt-data/tmp"
+Chef::Log::Formatter.show_time = false
+
+EOF
+
+$CMDPREFIX mv chefconf.rb /opt/dt-data/run/controllerschefconf.rb
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+cat >> rerun-controllerschef.sh << "EOF"
+#!/bin/bash
+CHEFLEVEL="info"
+if [ "X" != "X$1" ]; then
+ CHEFLEVEL=$1
+fi
+rm -rf /home/controllers/app
+rm -rf /home/controllers/app-venv
+chef-solo -l $CHEFLEVEL -c /opt/dt-data/run/controllerschefconf.rb -j /opt/dt-data/run/controllerschefroles.json
+exit $?
+EOF
+
+chmod +x rerun-controllerschef.sh
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+$CMDPREFIX mv rerun-controllerschef.sh /opt/rerun-controllerschef.sh
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+echo "Running chef-solo"
+$CMDPREFIX /opt/rerun-controllerschef.sh #debug
+if [ $? -ne 0 ]; then
+ exit 1
+fi
View
14 src/python/tests/configs/sleeper1/deps.conf
@@ -0,0 +1,14 @@
+[deps]
+
+# Any EPU controller AMQP service name in the system must use a dependency
+# key that begins with "epu_controller". This is a convention that makes
+# it so you don't need to manually query the system.
+
+epu_controller1_servicename: epu_controller_sleeper1
+
+worker_deployable_type: sleeper
+queue1_name_work: sleeper1
+worker_id: ami-0bbaea4e
+worker_iaas_site: ec2-west
+worker_allocation: m1.large
+preserve_n: 1
View
93 src/python/tests/configs/sleeper1/one-epu.json
@@ -0,0 +1,93 @@
+{
+ "ioncontainers":{
+ "producer1-container": [
+ {
+ "name": "epu-work-producer",
+ "args": { },
+ "version": "0.1",
+ "config": {
+ "epu.ionproc.epu_work_producer":{
+ "servicename":"sleepproducer1",
+ "queue_name_work":"${queue1_name_work}",
+ "listen_port":"8001"
+ }
+ }
+ }
+ ],
+ "sleeper1-container": [
+ {
+ "name": "epu-controller",
+ "args": { },
+ "version": "0.1",
+ "config": {
+ "epu.ionproc.epu_controller":{
+ "servicename":"${epu_controller1_servicename}",
+ "queue_name_work":"${queue1_name_work}",
+ "engine_class":"epu.decisionengine.impls.NpreservingEngine",
+ "engine_conf" : {
+ "preserve_n":"${preserve_n}",
+ "force_site":"${worker_iaas_site}",
+ "epuworker_type":"${worker_deployable_type}",
+ "provisioner_vars" : {
+ "exchange_scope":"${exchange_scope}",
+ "epuworker_image_id":"${worker_id}",
+ "queue_name_work":"${queue1_name_work}",
+ "broker_ip_address":"${broker_ip_address}",
+ "broker_username":"${rabbitmq_username}",
+ "broker_password":"${rabbitmq_password}",
+ "retrieve_method":"${epu_retrieve_method}",
+ "archive_url":"${package_repo}/${epu_archive_name}",
+ "package_repo":"${package_repo}",
+ "git_repo":"${epu_git_repo}",
+ "git_branch":"${epu_git_branch}",
+ "git_commit":"${epu_git_commit}",
+ "ion_log_level":"${ion_log_level}"
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ "universal_app_confs":{
+ "broker_host_list":"${broker_ip_address}",
+ "sysname":"${exchange_scope}"
+ },
+ "local_app_confs":{
+ "epu.cassandra":{
+ "hostname":"${cassandra_hostname}",
+ "port":9160,
+ "username":"${cassandra_username}",
+ "password":"${cassandra_password}",
+ "keyspace":"${exchange_scope}"
+ }
+ },
+ "virtualenv": {
+ "path" : "/home/controllers/app-venv"
+ },
+ "pythoncc":{
+ "broker":"${broker_ip_address}",
+ "sysname":"${exchange_scope}",
+ "broker_heartbeat":"5",
+ "broker_username":"${rabbitmq_username}",
+ "broker_password":"${rabbitmq_password}",
+ "log_level":"${ion_log_level}"
+ },
+ "appretrieve":{
+ "retrieve_method":"${epu_retrieve_method}",
+ "archive_url":"${package_repo}/${epu_archive_name}",
+ "git_repo":"${epu_git_repo}",
+ "git_branch":"${epu_git_branch}",
+ "git_commit":"${epu_git_commit}"
+ },
+ "appinstall":{
+ "install_method":"py_venv_setup",
+ "package_repo":"${package_repo}"
+ },
+ "apprun":{
+ "run_method":"supervised"
+ },
+ "username":"controllers",
+ "groupname":"users",
+ "recipes":["user", "r1app"]
+}
View
12 src/python/tests/configs/sleeper1/sleeper1.conf
@@ -0,0 +1,12 @@
+[svc-epu-onesleeper]
+hostname: ${basenode.hostname}
+bootpgm: chefsolo.sh
+readypgm: ../common/beentrue.sh
+deps1: ../common/deps.conf
+deps2: ../level1/deps.conf
+deps3: deps.conf
+readypgm: state-wait.py
+
+bootconf: one-epu.json
+
+scp_username: controllers
View
24 src/python/tests/configs/sleeper1/state-wait.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# We will have to make something that can automically determine a list of EPU
+# controllers to wait for in each level. For now, hardcoding the name here.
+
+CONTROLLER="epu_controller_sleeper1"
+
+APP_DIR="/home/controllers/app"
+
+MESSAGING_CONF="/home/ubuntu/messaging.conf"
+VENV_PYTHON="sudo /home/controllers/app-venv/bin/python"
+
+import os
+import subprocess
+import sys
+
+run = [VENV_PYTHON, "./scripts/epu-state-wait", MESSAGING_CONF, CONTROLLER]
+runcmd = ' '.join(run)
+print runcmd
+retcode = subprocess.call(runcmd, shell=True, cwd=APP_DIR, stderr=subprocess.STDOUT)
+
+if retcode:
+ print "Problem waiting for EPU controller stable state for '%s'" % CONTROLLER
+sys.exit(retcode)
View
16 src/python/tests/configs/sleeper2/deps.conf
@@ -0,0 +1,16 @@
+[deps]
+
+# Any EPU controller AMQP service name in the system must use a dependency
+# key that begins with "epu_controller". This is a convention that makes
+# it so you don't need to manually query the system.
+
+epu_controller1_servicename: epu_controller_sleeper1
+epu_controller2_servicename: epu_controller_sleeper2
+
+worker_deployable_type: sleeper
+queue1_name_work: sleeper1
+queue2_name_work: sleeper2
+worker_id: ami-0bbaea4e
+worker_iaas_site: ec2-west
+worker_allocation: m1.large
+preserve_n: 1
View
12 src/python/tests/configs/sleeper2/sleeper2.conf
@@ -0,0 +1,12 @@
+[svc-epu-twosleepers]
+hostname: ${basenode.hostname}
+bootpgm: ../sleeper1/chefsolo.sh
+readypgm: ../common/beentrue.sh
+deps1: ../common/deps.conf
+deps2: ../level1/deps.conf
+deps3: deps.conf
+readypgm: state-wait.py
+
+bootconf: two-epus.json
+
+scp_username: controllers
View
35 src/python/tests/configs/sleeper2/state-wait.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# We will have to make something that can automically determine a list of EPU
+# controllers to wait for in each level. For now, hardcoding the name here.
+
+CONTROLLER="epu_controller_sleeper1"
+CONTROLLER2="epu_controller_sleeper2"
+
+APP_DIR="/home/controllers/app"
+
+MESSAGING_CONF="/home/ubuntu/messaging.conf"
+VENV_PYTHON="sudo /home/controllers/app-venv/bin/python"
+
+import os
+import subprocess
+import sys
+
+run = [VENV_PYTHON, "./scripts/epu-state-wait", MESSAGING_CONF, CONTROLLER]
+runcmd = ' '.join(run)
+print runcmd
+retcode = subprocess.call(runcmd, shell=True, cwd=APP_DIR, stderr=subprocess.STDOUT)
+
+if retcode:
+ print "Problem waiting for EPU controller stable state for '%s'" % CONTROLLER
+ sys.exit(retcode)
+
+run = [VENV_PYTHON, "./scripts/epu-state-wait", MESSAGING_CONF, CONTROLLER2]
+runcmd = ' '.join(run)
+print runcmd
+retcode = subprocess.call(runcmd, shell=True, cwd=APP_DIR, stderr=subprocess.STDOUT)
+
+if retcode:
+ print "Problem waiting for EPU controller stable state for '%s'" % CONTROLLER2
+
+sys.exit(retcode)
View
142 src/python/tests/configs/sleeper2/two-epus.json
@@ -0,0 +1,142 @@
+{
+ "ioncontainers":{
+ "producer1-container": [
+ {
+ "name": "epu-work-producer",
+ "args": { },
+ "version": "0.1",
+ "config": {
+ "epu.ionproc.epu_work_producer":{
+ "servicename":"sleepproducer1",
+ "queue_name_work":"${queue1_name_work}",
+ "listen_port":"8001"
+ }
+ }
+ }
+ ],
+ "producer2-container": [
+ {
+ "name": "epu-work-producer",
+ "args": { },
+ "version": "0.1",
+ "config": {
+ "epu.ionproc.epu_work_producer":{
+ "servicename":"sleepproducer2",
+ "queue_name_work":"${queue2_name_work}",
+ "listen_port":"8002"
+ }
+ }
+ }
+ ],
+ "sleeper1-container": [
+ {
+ "name": "epu-controller",
+ "args": { },
+ "version": "0.1",
+ "config": {
+ "epu.ionproc.epu_controller":{
+ "servicename":"${epu_controller1_servicename}",
+ "queue_name_work":"${queue1_name_work}",
+ "engine_class":"epu.decisionengine.impls.NpreservingEngine",
+ "engine_conf" : {
+ "preserve_n":"${preserve_n}",
+ "force_site":"${worker_iaas_site}",
+ "epuworker_type":"${worker_deployable_type}",
+ "provisioner_vars" : {
+ "exchange_scope":"${exchange_scope}",
+ "epuworker_image_id":"${worker_id}",
+ "queue_name_work":"${queue1_name_work}",
+ "broker_ip_address":"${broker_ip_address}",
+ "broker_username":"${rabbitmq_username}",
+ "broker_password":"${rabbitmq_password}",
+ "retrieve_method":"${epu_retrieve_method}",
+ "archive_url":"${package_repo}/${epu_archive_name}",
+ "package_repo":"${package_repo}",
+ "git_repo":"${epu_git_repo}",
+ "git_branch":"${epu_git_branch}",
+ "git_commit":"${epu_git_commit}",
+ "ion_log_level":"${ion_log_level}"
+ }
+ }
+ }
+ }
+ }
+ ],
+ "sleeper2-container": [
+ {
+ "name": "epu-controller",
+ "args": { },
+ "version": "0.1",
+ "config": {
+ "epu.ionproc.epu_controller":{
+ "servicename":"${epu_controller2_servicename}",
+ "queue_name_work":"${queue2_name_work}",
+ "engine_class":"epu.decisionengine.impls.NpreservingEngine",
+ "engine_conf" : {
+ "preserve_n":"${preserve_n}",
+ "force_site":"${worker_iaas_site}",
+ "epuworker_type":"${worker_deployable_type}",
+ "monitor_health":"True",
+ "provisioner_vars" : {
+ "exchange_scope":"${exchange_scope}",
+ "epuworker_image_id":"${worker_id}",
+ "queue_name_work":"${queue2_name_work}",
+ "broker_ip_address":"${broker_ip_address}",
+ "broker_username":"${rabbitmq_username}",
+ "broker_password":"${rabbitmq_password}",
+ "retrieve_method":"${epu_retrieve_method}",
+ "archive_url":"${package_repo}/${epu_archive_name}",
+ "package_repo":"${package_repo}",
+ "git_repo":"${epu_git_repo}",
+ "git_branch":"${epu_git_branch}",
+ "git_commit":"${epu_git_commit}",
+ "ion_log_level":"${ion_log_level}"
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ "universal_app_confs":{
+ "broker_host_list":"${broker_ip_address}",
+ "sysname":"${exchange_scope}"
+ },
+ "local_app_confs":{
+ "epu.cassandra":{
+ "hostname":"${cassandra_hostname}",
+ "port":9160,
+ "username":"${cassandra_username}",
+ "password":"${cassandra_password}",
+ "keyspace":"${exchange_scope}"
+ }
+ },
+ "virtualenv": {
+ "path" : "/home/controllers/app-venv"
+ },
+ "pythoncc":{
+ "broker":"${broker_ip_address}",
+ "sysname":"${exchange_scope}",
+ "broker_heartbeat":"5",
+ "broker_username":"${rabbitmq_username}",
+ "broker_password":"${rabbitmq_password}",
+ "log_level":"${ion_log_level}"
+ },
+ "appretrieve":{
+ "retrieve_method":"${epu_retrieve_method}",
+ "archive_url":"${package_repo}/${epu_archive_name}",
+ "git_repo":"${epu_git_repo}",
+ "git_branch":"${epu_git_branch}",
+ "git_commit":"${epu_git_commit}"
+ },
+ "appinstall":{
+ "install_method":"py_venv_setup",
+ "package_repo":"${package_repo}"
+ },
+ "apprun":{
+ "run_method":"supervised"
+ },
+ "username":"controllers",
+ "groupname":"users",
+ "recipes":["user", "r1app"]
+}
View
4 src/python/tests/mocks/common.py
@@ -22,6 +22,8 @@ class FakeCommon():
"""
- def __init__(self):
+ def __init__(self, p=None):
self.log = FakeLog()
+ self.trace = False
+ self.p = p
View
9 src/python/tests/mocks/event.py
@@ -4,7 +4,8 @@ class Event:
"""
def __init__(self, name="", timestamp="", state=None, source="",
- last_queuelen_size=None, de_state=None):
+ last_queuelen_size=None, de_state=None, iaas_id=None,
+ node_id=None, public_ip=None):
self.name = name
self.timestamp = timestamp
self.source = source
@@ -15,3 +16,9 @@ def __init__(self, name="", timestamp="", state=None, source="",
self.extra["last_queuelen_size"] = last_queuelen_size
if de_state:
self.extra["de_state"] = de_state
+ if public_ip:
+ self.extra["public_ip"] = public_ip
+ if iaas_id:
+ self.extra["iaas_id"] = iaas_id
+ if node_id:
+ self.extra["node_id"] = node_id
View
23 src/python/tests/mocks/modules.py
@@ -1,15 +1,30 @@
-
class FakePersistence:
def __init__(self):
- self.vm_store = []
+ self.vm_store = {}
def store_run_vms(self, run_name, vms):
- self.vm_store.extend(vms)
+ if not self.vm_store.has_key(run_name):
+ self.vm_store[run_name] = []
+
+ self.vm_store[run_name].extend(vms)
+
+ def get_run_vms_or_none(self, run_name):
+ return self.vm_store[run_name]
class FakeModules:
- def __init__(self, remote_svc_adapter=None):
+ def __init__(self, remote_svc_adapter=None, runlogs=None):
self.remote_svc_adapter = remote_svc_adapter
self.persistence = FakePersistence()
+ self.runlogs = runlogs
+
+def build_fake_scp_command_str(target, real_scp_command_str):
+ def fake_scp_command_str(target, c, vm, cloudinitd):
+ scpcmd = real_scp_command_str(c, vm, cloudinitd)
+ scpcmd = "echo %s" % scpcmd
+ return scpcmd
+
+ return fake_scp_command_str
+
View
9 src/python/tests/mocks/runlogs.py
@@ -0,0 +1,9 @@
+
+class FakeRunlogs:
+
+ def __init__(self):
+
+ self.vms = []
+
+ def new_vm(self, vm):
+ self.vms.append(vm)
View
110 src/python/tests/test_epumgmt_api.py
@@ -0,0 +1,110 @@
+import os
+import shutil
+import tempfile
+import ConfigParser
+import epumgmt.api
+
+class TestEpumgmtAPI:
+
+ def setup(self):
+
+ class FakeOpts:
+ name = None
+ self.opts = FakeOpts()
+ self.opts.name = "testname"
+
+ self.epu_home = tempfile.mkdtemp()
+ conf_dir = os.path.join(self.epu_home, "etc/epumgmt")
+ os.makedirs(conf_dir)
+ self.main_conf = os.path.join(conf_dir, "main.conf")
+ self.dirs_conf = os.path.join(conf_dir, "dirs.conf")
+ self.internal_conf = os.path.join(conf_dir, "internal.conf")
+
+ main_conf_str = "[otherconfs]\ndirs: dirs.conf\ninternal: internal.conf"
+
+ self.var_dir = "/var/"
+ dirs_conf_str = "[ecdirs]\nvar: %s" % self.var_dir
+
+ internal_conf_str = "[emimpls]\n"
+ internal_conf_str += "Parameters: epumgmt.defaults.DefaultParameters\n"
+ internal_conf_str += "Common: mocks.common.FakeCommon\n"
+
+ with open(self.main_conf, "w") as main:
+ main.write(main_conf_str)
+
+ with open(self.dirs_conf, "w") as dirs:
+ dirs.write(dirs_conf_str)
+
+ with open(self.internal_conf, "w") as internal:
+ internal.write(internal_conf_str)
+
+ def teardown(self):
+ shutil.rmtree(self.epu_home)
+
+ def test_get_default_config(self):
+ from epumgmt.api import get_default_config
+
+ epumgmt_home = "/path/to/epumgmt"
+ default_conf_rel = "etc/epumgmt/main.conf"
+ default_config = os.path.join(epumgmt_home, default_conf_rel)
+ os.environ["EPUMGMT_HOME"] = epumgmt_home
+
+ assert get_default_config() == default_config
+
+ def test_get_default_ac(self):
+ from epumgmt.api import get_default_ac
+ from epumgmt.api.exceptions import InvalidConfig
+
+ os.environ["EPUMGMT_HOME"] = self.epu_home
+
+ ac = get_default_ac()
+
+ assert ac.has_section("ecdirs")
+ assert ac.has_option("ecdirs", "var")
+ assert ac.get("ecdirs", "var") == self.var_dir
+
+
+ def test_get_parameters(self):
+ from epumgmt.api import get_parameters
+ from epumgmt.defaults.parameters import DefaultParameters
+
+
+ ac = ConfigParser.ConfigParser()
+ ac.add_section("emimpls")
+ ac.set("emimpls", "Parameters", "epumgmt.defaults.parameters.DefaultParameters")
+
+ # Test whether we can pass in an allconfigs object
+ p, ret_ac = get_parameters(self.opts, ac)
+
+ default_params_class = DefaultParameters(None, None).__class__
+ assert p.__class__ == default_params_class
+ assert p.get_arg_or_none("name") == self.opts.name
+
+ # Test whether we can get allconfigs from env
+ os.environ["EPUMGMT_HOME"] = self.epu_home
+ p, ret_ac = get_parameters(self.opts)
+
+ assert ret_ac.has_section("emimpls")
+ assert p.__class__ == default_params_class
+ assert p.get_arg_or_none("name") == self.opts.name
+
+
+ def test_get_common(self):
+ from epumgmt.api import get_common
+ from mocks.common import FakeCommon
+
+ try:
+ get_common()
+ except Exception:
+ no_opts_nor_p_exception = True
+
+ assert no_opts_nor_p_exception
+
+ common_class = FakeCommon().__class__
+
+ os.environ["EPUMGMT_HOME"] = self.epu_home
+ c, p, ac = get_common(opts=self.opts)
+
+ assert c.__class__ == common_class
+ assert p.get_arg_or_none("name") == self.opts.name
+ assert ac.has_section("emimpls")
View
103 src/python/tests/test_epumgmt_default_common.py
@@ -0,0 +1,103 @@
+import os
+import shutil
+import logging
+import tempfile
+import ConfigParser
+
+import epumgmt.defaults.common
+from epumgmt.defaults import DefaultCommon
+from epumgmt.defaults import DefaultParameters
+from epumgmt.api.exceptions import InvalidConfig, UnexpectedError
+
+class TestDefaultCommon:
+
+ def setup(self):
+
+ self.logdir = tempfile.mkdtemp()
+
+ self.ac = ConfigParser.ConfigParser()
+ self.ac.add_section("ecdirs")
+ self.ac.add_section("logging")
+ self.ac.set("logging", "stdoutloglevel", "4")
+ self.ac.add_section("emimpls")
+
+ self.p = DefaultParameters(self.ac, None)
+ self.common = DefaultCommon(self.p)
+
+ def teardown(self):
+ shutil.rmtree(self.logdir)
+
+ def test_init(self):
+
+ try:
+ epumgmt.defaults.common.DefaultCommon(None)
+ except InvalidConfig:
+ raised_invalidconfig = True
+
+ assert raised_invalidconfig
+
+ def test_resolve_var_dir(self):
+
+ var_file = "logs"
+
+ try:
+ self.common.resolve_var_dir(var_file)
+ except InvalidConfig:
+ invalid_config_raised = True
+
+ assert invalid_config_raised
+
+
+ self.ac.set("ecdirs", "var", "var/epumgmt")
+ got_vardir = self.common.resolve_var_dir(var_file)
+
+ vardir = os.path.join(os.path.dirname(__file__), "../../..", "var/epumgmt", var_file)
+
+ assert os.path.samefile(got_vardir, vardir)
+
+
+ def test_get_class_by_keyword(self):
+ from mocks.common import FakeCommon
+
+ test_keyword = "TheClass"
+ test_class = "mocks.common.FakeCommon"
+
+ try:
+ self.common.get_class_by_keyword(test_keyword)
+ except UnexpectedError:
+ raised_unexpected_error = True
+
+ assert raised_unexpected_error
+
+ self.ac.set("emimpls", test_keyword, test_class)
+
+ kls = self.common.get_class_by_keyword(test_keyword)
+
+ assert kls == FakeCommon
+
+def test_close_logfile():
+
+ # This test runs outside the test object to avoid closing
+ # the log file before the other tests can use it
+
+ logdir = tempfile.mkdtemp()
+
+ ac = ConfigParser.ConfigParser()
+ ac.add_section("ecdirs")
+ ac.add_section("logging")
+ ac.set("logging", "stdoutloglevel", "4")
+ ac.add_section("emimpls")
+
+ ac.set("logging", "logfiledir", logdir)
+ ac.set("logging", "fileloglevel", "0")
+
+ params = DefaultParameters(ac, None)
+ logging_common = DefaultCommon(params)
+
+ assert os.path.isfile(logging_common.logfilepath)
+ assert logging_common.logfilehandler
+
+ logging_common.close_logfile()
+ assert not logging_common.logfilehandler
+
+ shutil.rmtree(logdir)
View
84 src/python/tests/test_epumgmt_defaults_cloudinitd_load.py
@@ -0,0 +1,84 @@
+import os
+import types
+import tempfile
+import ConfigParser
+
+from cloudinitd.user_api import CloudInitD
+
+import epumgmt.defaults.cloudinitd_load
+import epumgmt.main.em_args as em_args
+from epumgmt.api.exceptions import ProgrammingError, IncompatibleEnvironment
+from epumgmt.defaults.runlogs import DefaultRunlogs
+from epumgmt.defaults.parameters import DefaultParameters
+
+from mocks.common import FakeCommon
+from mocks.modules import FakeModules
+from mocks.modules import build_fake_scp_command_str
+from mocks.remote_svc_adapter import FakeRemoteSvcAdapter
+
+class TestCloudinitdLoad:
+ def setup(self):
+ """
+ Build a fake test environment, with the sleepers cloudinit.d plan.
+
+ We can grab all logged messages from c.log.transcript.
+ """
+
+ self.test_run_name = "TESTRUN"
+
+ self.config = ConfigParser.RawConfigParser()
+ self.config.add_section("events")
+ self.runlogdir = tempfile.mkdtemp()
+ self.config.set("events", "runlogdir", self.runlogdir)
+ self.vmlogdir = tempfile.mkdtemp()
+ self.config.set("events", "vmlogdir", self.vmlogdir)
+ self.optdict = {}
+ self.optdict[em_args.NAME.name] = self.test_run_name
+
+ self.params = DefaultParameters(self.config, None)
+ self.params.optdict = self.optdict
+ remote_svc_adapter = FakeRemoteSvcAdapter()
+ self.common = FakeCommon()
+ self.modules = FakeModules(remote_svc_adapter=remote_svc_adapter)
+
+ # Note that we monkey-patch the get_scp_command_str function
+ # to prepend "echo" to it. That way we can still allow the
+ # command to be run, but we can still see how it actually gets
+ # constructed
+ runlogs = DefaultRunlogs(self.params, self.common)
+ runlogs.validate()
+ self.modules.runlogs = runlogs
+ new_get_scp = build_fake_scp_command_str(runlogs, runlogs.get_scp_command_str)
+ self.modules.runlogs.get_scp_command_str = types.MethodType(new_get_scp, self.modules.runlogs)
+
+ self.test_dir = os.path.dirname(__file__)
+ self.test_db_dir = tempfile.mkdtemp()
+ self.test_cd_config = os.path.join(self.test_dir, "configs/main.conf")
+ self.cloudinitd = CloudInitD(self.test_db_dir, self.test_cd_config, self.test_run_name)
+
+ def test_get_cloudinitd_service(self):
+ from epumgmt.defaults.cloudinitd_load import get_cloudinitd_service
+
+ try:
+ get_cloudinitd_service(None, None)
+ except ProgrammingError:
+ no_cloudinitd_programming_error = True
+
+ assert no_cloudinitd_programming_error
+
+ try:
+ get_cloudinitd_service(self.cloudinitd, None)
+ except ProgrammingError:
+ no_service_name_programming_error = True
+
+ assert no_service_name_programming_error
+
+ nonexistant_svc = "notreal"
+
+ try:
+ service = get_cloudinitd_service(self.cloudinitd, nonexistant_svc)
+ except IncompatibleEnvironment:
+ no_service_incompatible_env = True
+
+ assert no_service_incompatible_env
+
View
135 src/python/tests/test_epumgmt_defaults_log_events.py
@@ -0,0 +1,135 @@
+import os
+import shutil
+import tempfile
+import ConfigParser
+
+import epumgmt.defaults.log_events
+from epumgmt.defaults import DefaultParameters
+from mocks.common import FakeCommon
+
+class TestAmqpEvents:
+
+ def setup(self):
+
+ self.runlogdir = tempfile.mkdtemp()
+ self.vmlogdir = tempfile.mkdtemp()
+
+ producer_dir = os.path.join(self.runlogdir, "producer1-container")
+ os.mkdir(producer_dir)
+ self.producer_ioncontainer_log = os.path.join(producer_dir, "ioncontainer.log")
+ with open(self.producer_ioncontainer_log, "w") as container_file:
+ container_file.write("contents!")
+
+ consumer_dir = os.path.join(self.runlogdir, "epuworker_container")
+ os.mkdir(consumer_dir)
+ self.consumer_ioncontainer_log = os.path.join(consumer_dir, "ioncontainer.log")
+ with open(self.consumer_ioncontainer_log, "w") as container_file:
+ container_file.write("contents!")
+
+ self.config = ConfigParser.RawConfigParser()
+ self.config.add_section("events")
+ self.config.set("events", "runlogdir", self.runlogdir)
+ self.config.set("events", "vmlogdir", self.vmlogdir)
+
+
+ self.c = FakeCommon()
+ self.p = DefaultParameters(self.config, None)
+ self.amqp_events = epumgmt.defaults.log_events.AmqpEvents(self.p, self.c, None, "")
+
+ def teardown(self):
+
+ shutil.rmtree(self.runlogdir)
+ shutil.rmtree(self.vmlogdir)
+
+ def test_create_datetime(self):
+
+ year = 2011
+ month = 4
+ day = 5
+ hour = 4
+ minute = 3
+ second = 7
+ microsecond = 6
+ timestamp = { "year": year, "month": month, "day": day,
+ "hour": hour, "minute": minute, "second": second,
+ "microsecond": microsecond }
+
+ got_datetime = self.amqp_events._create_datetime(timestamp)
+
+ print dir(got_datetime)
+
+ assert got_datetime.year == year
+ assert got_datetime.minute == minute
+ assert got_datetime.day == day
+
+
+ def test_set_workproducerlog_filenames(self):
+
+ self.amqp_events._set_workproducerlog_filenames()
+ assert self.producer_ioncontainer_log in self.amqp_events.workproducerlog_filenames
+
+
+ def test_set_workconsumerlog_filenames(self):
+
+ self.amqp_events._set_workconsumerlog_filenames()
+ assert self.consumer_ioncontainer_log in self.amqp_events.workconsumerlog_filenames
+
+
+ def test_update_log_filenames(self):
+
+ self.amqp_events._update_log_filenames()
+ assert self.consumer_ioncontainer_log in self.amqp_events.workconsumerlog_filenames
+ assert self.producer_ioncontainer_log in self.amqp_events.workproducerlog_filenames
+
+
+ def test_get_event_datetimes_dict(self):
+
+ got_datetimes = self.amqp_events.get_event_datetimes_dict("fake event")
+ assert got_datetimes == {}
+
+
+ job_begin_id = 545454
+ job_begin_event = '2011-07-07 11:03:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_begin", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_begin_id
+ job_end_id = 424242
+ job_end_event = '2011-07-07 11:04:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_end", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_end_id
+
+ with open(self.consumer_ioncontainer_log, "w") as container:
+ container.write(job_begin_event + job_end_event)
+
+ job_sent_id = 424244
+ job_sent_event = '2011-07-07 11:04:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_sent", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_sent_id
+
+ with open(self.producer_ioncontainer_log, "w") as container:
+ container.write(job_sent_event)
+
+
+ got_datetimes = self.amqp_events.get_event_datetimes_dict("job_end")
+ assert got_datetimes.has_key(job_end_id)
+
+
+ got_datetimes = self.amqp_events.get_event_datetimes_dict("job_begin")
+ assert got_datetimes.has_key(job_begin_id)
+
+
+ got_datetimes = self.amqp_events.get_event_datetimes_dict("job_sent")
+ assert got_datetimes.has_key(job_sent_id)
+
+ def test_get_event_datetimes_dict_badfile(self):
+
+ job_sent_id = 424244
+ job_sent_event = '2011-07-07 11:04:07,532 [cei_events : 32] WARNING:CLOUDYVENT_JSON: {"eventname": "job_sent", "timestamp": {"hour": 18, "month": 7, "second": 7, "microsecond": 532627, "year": 2011, "day": 7, "minute": 4}, "uniquekey": "2c5a9f30-a1b8-4621-ac68-d66ca1cd99f5", "eventsource": "worker", "extra": {"batchid": "xchg1310061055-jobs", "work_amount": 0, "jobid": %s}}\n' % job_sent_id
+
+ with open(self.producer_ioncontainer_log, "w") as container:
+ container.write(job_sent_event)
+
+ old_mode = os.stat(self.producer_ioncontainer_log).st_mode
+ os.chmod(self.producer_ioncontainer_log, 0)
+ got_datetimes = self.amqp_events.get_event_datetimes_dict("job_sent")
+
+ failed_to_open = [message for (level, message)
+ in self.c.log.transcript
+ if level == "ERROR"
+ and "Failed to open and read" in message]
+
+ assert len(failed_to_open) == 1
+ os.chmod(self.producer_ioncontainer_log, old_mode)
View
81 src/python/tests/test_epumgmt_defaults_svc_adapter.py
@@ -1,7 +1,18 @@
+import nose.tools
+import os
+import tempfile
+import ConfigParser
+
import epumgmt.defaults.svc_adapter
+import epumgmt.main.em_args as em_args
+from cloudinitd.user_api import CloudInitD
+
from epumgmt.api.exceptions import *
+from epumgmt.api import RunVM
+from epumgmt.defaults.parameters import DefaultParameters
+from mocks.common import FakeCommon
+from mocks.modules import FakeModules
-import nose.tools
@nose.tools.raises(ProgrammingError)
def test_initialize_no_modules():
@@ -23,4 +34,72 @@ def test_check_init():
adapter = epumgmt.defaults.svc_adapter.DefaultRemoteSvcAdapter(None, None)
adapter._check_init()
+class TestDefaultRemoteSvcAdapter:
+
+ def setup(self):
+ from epumgmt.defaults.svc_adapter import DefaultRemoteSvcAdapter
+
+ self.run_name = "TESTRUN"
+
+ self.config = ConfigParser.RawConfigParser()
+ self.config.add_section("svcadapter")
+ self.config.set("svcadapter", "controller_prefix", "controller")
+ self.config.set("svcadapter", "homedir", "app")
+ self.config.set("svcadapter", "envfile", "app-venv/bin/activate")
+ self.optdict = {}
+ self.optdict[em_args.NAME.name] = self.run_name
+
+ self.p = DefaultParameters(self.config, None)
+ self.p.optdict = self.optdict
+
+
+ self.m = FakeModules()
+ self.c = FakeCommon()
+ self.svc_adapter = DefaultRemoteSvcAdapter(self.p, self.c)
+
+ self.test_dir = os.path.dirname(__file__)
+ self.test_db_dir = tempfile.mkdtemp()
+ self.test_cd_config = os.path.join(self.test_dir, "configs/main.conf")
+ self.cloudinitd = CloudInitD(self.test_db_dir, self.test_cd_config, self.run_name)
+
+
+ def test_worker_state(self):
+
+ # Check when nothing's passed to worker_state
+ try:
+ self.svc_adapter.worker_state(None, None)
+ raised_incompatible_env = False
+ except IncompatibleEnvironment:
+ raised_incompatible_env = True
+ assert raised_incompatible_env
+
+ provisioner = RunVM()
+
+ # Check when not initialized yet
+ try:
+ self.svc_adapter.worker_state(None, provisioner)
+ raised_programming_error = False
+ except ProgrammingError:
+ raised_programming_error = True
+ assert raised_programming_error
+
+
+ self.svc_adapter.initialize(self.m, self.run_name, self.cloudinitd)
+ # Check when no controllers provided
+ try:
+ self.svc_adapter.worker_state(None, provisioner)
+ raised_invalid_input = False
+ except InvalidInput:
+ raised_invalid_input = True
+ assert raised_invalid_input
+
+
+ controllers = ["one", "two"]
+ try:
+ self.svc_adapter.worker_state(controllers, provisioner)
+ raised_incompatible_env = False
+ except IncompatibleEnvironment:
+ raised_incompatible_env = True
+ assert raised_incompatible_env
+
View
126 src/python/tests/test_epumgmt_main_em_core_findworkers.py
@@ -0,0 +1,126 @@
+import epumgmt.main.em_core_findworkers
+import epumgmt.api
+
+from epumgmt.api.exceptions import IncompatibleEnvironment
+from epumgmt.defaults.runlogs import DefaultRunlogs
+import epumgmt.main.em_args as em_args
+from epumgmt.defaults.parameters import DefaultParameters
+
+from mocks.common import FakeCommon
+from mocks.modules import FakeModules
+from mocks.event import Event
+from mocks.runlogs import FakeRunlogs
+
+def test_get_provisioner():
+ from epumgmt.main.em_core_findworkers import _get_provisioner
+
+ m = FakeModules()
+ run_name = "TESTRUN"
+
+ m.persistence.store_run_vms(run_name, [])
+
+ try:
+ _get_provisioner(m, run_name)
+ raised_incompatible_env = False
+ except IncompatibleEnvironment:
+ raised_incompatible_env = True
+
+ assert raised_incompatible_env
+
+
+ non_provisioner_vm = epumgmt.api.RunVM()
+ non_provisioner_vm.service_type = "something"
+
+ m.persistence.store_run_vms(run_name, [non_provisioner_vm])
+
+ try:
+ _get_provisioner(m, run_name)
+ raised_incompatible_env = False
+ except IncompatibleEnvironment:
+ raised_incompatible_env = True
+
+ assert raised_incompatible_env
+
+
+ test_service_name = "provisioner"
+ test_provisioner = epumgmt.api.RunVM()
+ test_provisioner.service_type = test_service_name
+ test_provisioner_instanceid = "i-TEST"
+ test_provisioner.instanceid = test_provisioner_instanceid
+
+ m.persistence.store_run_vms(run_name, [non_provisioner_vm, test_provisioner])
+
+ got_provisioner = _get_provisioner(m, run_name)
+
+ assert got_provisioner == test_provisioner
+
+
+def test_vms_launched():
+ from epumgmt.main.em_core_findworkers import vms_launched
+
+ run_name = "TESTRUN"
+
+ c = FakeCommon()
+ m = FakeModules()
+
+ optdict = {}
+ optdict[em_args.NAME.name] = run_name
+
+ p = DefaultParameters(None, None)
+ p.optdict = optdict
+
+ m.runlogs = FakeRunlogs()
+
+ test_service_name = "provisioner"
+ test_provisioner = epumgmt.api.RunVM()
+ test_provisioner.service_type = test_service_name
+ test_provisioner_instanceid = "i-TEST"
+ test_provisioner.instanceid = test_provisioner_instanceid
+
+ m.persistence.store_run_vms(run_name, [test_provisioner])
+
+ got_vms = vms_launched(m, run_name, "new_node")
+ assert got_vms == []
+
+
+ vm_0_id = "i-apple"
+ vm_0_nodeid = "applenodeid"
+ vm_0_publicip = "8.8.8.8"
+ vm_0_new_node_event = Event(name="new_node", iaas_id=vm_0_id,
+ node_id=vm_0_nodeid, public_ip=vm_0_publicip)
+
+ test_provisioner.events.append(vm_0_new_node_event)
+
+ got_vms = vms_launched(m, run_name, "new_node")
+ assert got_vms[0].instanceid == vm_0_id
+
+
+ test_provisioner.events = []
+ got_vms = vms_launched(m, run_name, "new_node")
+ assert got_vms == []
+
+
+ vm_0_node_started_event = Event(name="node_started", iaas_id=vm_0_id,
+ node_id=vm_0_nodeid, public_ip=vm_0_publicip)
+ test_provisioner.events.append(vm_0_node_started_event)
+
+ got_vms = vms_launched(m, run_name, "node_started")
+ assert got_vms[0].instanceid == vm_0_id
+
+
+ test_provisioner.events = []
+ got_vms = vms_launched(m, run_name, "new_node")
+ assert got_vms == []
+
+
+ vm_0_bad_event = Event(name="bad", iaas_id=vm_0_id,
+ node_id=vm_0_nodeid, public_ip=vm_0_publicip)
+ test_provisioner.events.append(vm_0_bad_event)
+
+ try:
+ got_vms = vms_launched(m, run_name, "bad")
+ raised_incompatible_env = False
+ except IncompatibleEnvironment:
+ raised_incompatible_env = True
+ assert raised_incompatible_env
+
View
198 src/python/tests/test_epumgmt_main_em_core_logfetch.py
@@ -0,0 +1,198 @@
+import re
+import os
+import types
+import shutil
+import tempfile
+import ConfigParser
+
+import epumgmt.api
+import epumgmt.main.em_core_logfetch
+from epumgmt.defaults.parameters import DefaultParameters
+from cloudinitd.user_api import CloudInitD
+from epumgmt.defaults.runlogs import DefaultRunlogs
+import epumgmt.main.em_args as em_args
+import epumgmt.defaults.epustates as epustates
+
+from mocks.common import FakeCommon
+from mocks.modules import FakeModules
+from mocks.modules import build_fake_scp_command_str
+from mocks.remote_svc_adapter import FakeRemoteSvcAdapter
+from mocks.event import Event
+
+class TestLogfetch:
+
+ def setup(self):
+ """
+ Build a fake test environment, with the sleepers cloudinit.d plan.
+
+ We can grab all logged messages from c.log.transcript.
+ """
+
+ self.test_run_name = "TESTRUN"
+
+ self.config = ConfigParser.RawConfigParser()
+ self.config.add_section("events")
+ self.runlogdir = tempfile.mkdtemp()
+ self.config.set("events", "runlogdir", self.runlogdir)
+ self.vmlogdir = tempfile.mkdtemp()
+ self.config.set("events", "vmlogdir", self.vmlogdir)
+ self.optdict = {}
+ self.optdict[em_args.NAME.name] = self.test_run_name
+
+ self.params = DefaultParameters(self.config, None)
+ self.params.optdict = self.optdict
+ remote_svc_adapter = FakeRemoteSvcAdapter()
+ self.common = FakeCommon()
+ self.modules = FakeModules(remote_svc_adapter=remote_svc_adapter)
+
+ # Note that we monkey-patch the get_scp_command_str function
+ # to prepend "echo" to it. That way we can still allow the
+ # command to be run, but we can still see how it actually gets
+ # constructed
+ runlogs = DefaultRunlogs(self.params, self.common)
+ runlogs.validate()
+ self.modules.runlogs = runlogs
+ new_get_scp = build_fake_scp_command_str(runlogs, runlogs.get_scp_command_str)
+ self.modules.runlogs.get_scp_command_str = types.MethodType(new_get_scp, self.modules.runlogs)
+
+ self.test_dir = os.path.dirname(__file__)
+ self.test_db_dir = tempfile.mkdtemp()
+ self.test_cd_config = os.path.join(self.test_dir, "configs/main.conf")
+ self.cloudinitd = CloudInitD(self.test_db_dir, self.test_cd_config, self.test_run_name)
+
+ def teardown(self):
+ shutil.rmtree(self.test_db_dir)
+
+ def test_fetch_one_vm(self):
+ from epumgmt.main.em_core_logfetch import _fetch_one_vm
+
+ test_vm = epumgmt.api.RunVM()
+
+ _fetch_one_vm(self.params, self.common, self.modules,
+ self.test_run_name, test_vm, cloudinitd=self.cloudinitd)
+
+ def test_fetch_by_service_name(self):
+ """
+ This test constructs a RunVM instance, and then asks
+ logfetch to grab its logs. We confirm that the correct
+ scp call was made indirectly by examining the transcript
+ of the log files.
+
+ We also neuter the scp call by prefixing it with echo, since
+ we're not trying to scp from a real host.
+ """
+
+ from epumgmt.main.em_core_logfetch import fetch_by_service_name
+
+ test_service_name = "provisioner"
+
+ test_provisioner = epumgmt.api.RunVM()
+ test_provisioner.service_type = test_service_name
+ test_provisioner_hostname = "test.hostname.example.com"
+ test_provisioner.hostname = test_provisioner_hostname
+ test_provisioner_vmlogdir = "/some/fake/logdir"
+ test_provisioner.vmlogdir = test_provisioner_vmlogdir
+ test_provisioner_runlogdir = "/some/fake/local/runlogdir"
+ test_provisioner.runlogdir = test_provisioner_runlogdir
+ test_provisioner_instanceid = "i-TEST"
+ test_provisioner.instanceid = test_provisioner_instanceid
+
+ test_run_vms = []
+ test_run_vms.append(test_provisioner)
+ self.modules.persistence.store_run_vms(self.test_run_name, test_run_vms)
+
+ # Be tricky and patch in our hostname
+ self.cloudinitd.get_service("provisioner")._svc._s.hostname = test_provisioner_hostname
+
+ fetch_by_service_name(self.params, self.common, self.modules,
+ self.test_run_name, test_service_name, self.cloudinitd)
+
+ run_commands = [message for (level, message)
+ in self.common.log.transcript
+ if level == "DEBUG"
+ and "command =" in message]
+
+ # confirm that scp command gets called for our service
+ expected_scp_pattern = ".*@%s:%s %s" % (test_provisioner_hostname,
+ test_provisioner_vmlogdir,
+ test_provisioner_runlogdir)
+ # only expect one command to be run
+ assert len(run_commands) == 1
+ assert re.search(expected_scp_pattern, run_commands[0])
+
+
+ def test_fetch_all(self):
+
+ from epumgmt.main.em_core_logfetch import fetch_all
+
+ test_service_name = "provisioner"
+
+ test_provisioner = epumgmt.api.RunVM()
+ test_provisioner.service_type = test_service_name
+ test_provisioner_hostname = "test.hostname.example.com"
+ test_provisioner.hostname = test_provisioner_hostname
+ test_provisioner_vmlogdir = "/some/fake/logdir"
+ test_provisioner.vmlogdir = test_provisioner_vmlogdir
+ test_provisioner_runlogdir = "/some/fake/local/runlogdir"
+ test_provisioner.runlogdir = test_provisioner_runlogdir
+ test_provisioner_instanceid = "i-TEST"
+ test_provisioner.instanceid = test_provisioner_instanceid
+
+ # Be tricky and patch in our hostname
+ self.cloudinitd.get_service("provisioner")._svc._s.hostname = test_provisioner_hostname
+
+ # Two workers. Note that they have the same hostname
+ # to simulate the issue where we have a terminated worker
+ # and the second one was booted with the same hostname as
+ # the first
+ test_worker_0 = epumgmt.api.RunVM()
+ test_worker_0_service_type = "iamaworker"
+ test_worker_0.service_type = test_worker_0_service_type
+ test_worker_0_hostname = "worker0.example.com"
+ test_worker_0.hostname = test_worker_0_hostname
+ test_worker_0_instanceid = "i-TESTWORKER0"
+ test_worker_0.instanceid = test_worker_0_instanceid
+ test_worker_0_vmlogdir = "/some/fake/logdir"
+ test_worker_0.vmlogdir = test_worker_0_vmlogdir
+ test_worker_0_runlogdir = "/some/fake/%s/runlogdir" % test_worker_0_instanceid
+ test_worker_0.runlogdir = test_worker_0_runlogdir
+ test_worker_0_iaas_state = epustates.TERMINATED
+ test_worker_0_events = [Event(name="iaas_state", timestamp=1000, state=test_worker_0_iaas_state)]
+ test_worker_0.events = test_worker_0_events
+
+
+ test_worker_1 = epumgmt.api.RunVM()
+ test_worker_1_service_type = "iamaworker"
+ test_worker_1.service_type = test_worker_0_service_type
+ test_worker_1.hostname = test_worker_0_hostname
+ test_worker_1_instanceid = "i-TESTWORKER1"
+ test_worker_1.instanceid = test_worker_1_instanceid
+ test_worker_1_vmlogdir = "/some/fake/logdir"
+ test_worker_1.vmlogdir = test_worker_1_vmlogdir
+ test_worker_1_runlogdir = "/some/fake/%s/runlogdir/" % test_worker_1_instanceid
+ test_worker_1.runlogdir = test_worker_1_runlogdir
+ test_worker_1_iaas_state = epustates.RUNNING
+ test_worker_1_events = [Event(name="iaas_state", timestamp=1000, state=test_worker_1_iaas_state)]
+ test_worker_1.events = test_worker_1_events
+
+ test_run_vms = []
+ test_run_vms.append(test_provisioner)
+ test_run_vms.append(test_worker_0)
+ test_run_vms.append(test_worker_1)
+ self.modules.persistence.store_run_vms(self.test_run_name, test_run_vms)
+
+ fetch_all(self.params, self.common, self.modules, self.test_run_name, self.cloudinitd)
+ run_commands = [message for (level, message)
+ in self.common.log.transcript
+ if level == "DEBUG"
+ and "command =" in message]
+
+ # We have two VMs we should fetch from
+ assert len(run_commands) == 2
+
+
+
+ # Confirm that we never scp log files from the TERMINATED VM
+ for scp_command in run_commands:
+ assert scp_command.find(test_worker_0_instanceid) == -1
+
View
11 src/python/tests/test_epumgmt_main_em_core_status.py
@@ -373,8 +373,9 @@ def test_find_latest_worker_status(self):
# make sure at least one vm added to persistence
assert len(modules.persistence.vm_store) > 0
- for persisted_vm in modules.persistence.vm_store:
- assert persisted_vm.parent == controller_name
+ for run_name in modules.persistence.vm_store.keys():
+ for persisted_vm in modules.persistence.vm_store[run_name]:
+ assert persisted_vm.parent == controller_name
def test_find_state_from_events_not_yet_started(self):
"""Test for problems when status is called before epu is booted
@@ -451,7 +452,7 @@ def test_update_worker_parents(self):
_update_worker_parents(common, modules, run_name, controllers, controller_state_map, allvms)
assert worker_0_vm.parent == controller_0
- assert modules.persistence.vm_store[0] == worker_0_vm
+ assert modules.persistence.vm_store[run_name][0] == worker_0_vm
# Test when the VM's parent isn't the controller that owns it
@@ -519,7 +520,7 @@ def test_update_worker_states(self):
# Test standard case where the worker has one new event.
_update_worker_states(common, modules, run_name, controllers, controller_state_map, allvms)
- assert modules.persistence.vm_store[0] == worker_0_vm
+ assert modules.persistence.vm_store[run_name][0] == worker_0_vm
assert len(worker_0_vm.events) == 1
# Test for controllers being present in the controller_map, but not the
@@ -583,7 +584,7 @@ def test_update_controller_states(self):
_update_controller_states(common, modules, run_name, controller_map, controller_state_map, allvms)
- assert modules.persistence.vm_store[0] == controller_0_vm
+ assert modules.persistence.vm_store[