Permalink
Browse files

ctx-agent/ from 2009-01-10

  • Loading branch information...
1 parent 80433cc commit 67dffb673896f68b914857dfd4742f59997c252f @timf timf committed Sep 30, 2009
Showing with 7,224 additions and 0 deletions.
  1. +17 −0 ctx-agent/ctx-scripts/0-etchosts.sh
  2. +37 −0 ctx-agent/ctx-scripts/1-ipandhost/nfsclient
  3. +35 −0 ctx-agent/ctx-scripts/1-ipandhost/nfsserver
  4. +23 −0 ctx-agent/ctx-scripts/1-ipandhost/testrole1
  5. +23 −0 ctx-agent/ctx-scripts/1-ipandhost/testrole2
  6. +23 −0 ctx-agent/ctx-scripts/1-ipandhost/testrole3
  7. +33 −0 ctx-agent/ctx-scripts/1-ipandhost/torquemaster
  8. +37 −0 ctx-agent/ctx-scripts/1-ipandhost/torqueslave
  9. +89 −0 ctx-agent/ctx-scripts/2-thishost/publicnic
  10. +24 −0 ctx-agent/ctx-scripts/3-data/gridmap
  11. +25 −0 ctx-agent/ctx-scripts/3-data/startcontainer
  12. +27 −0 ctx-agent/ctx-scripts/3-data/testdata
  13. +31 −0 ctx-agent/ctx-scripts/4-restarts/nfsclient
  14. +49 −0 ctx-agent/ctx-scripts/4-restarts/nfsserver
  15. +21 −0 ctx-agent/ctx-scripts/4-restarts/testrole2
  16. +20 −0 ctx-agent/ctx-scripts/4-restarts/torquemaster
  17. +21 −0 ctx-agent/ctx-scripts/4-restarts/torqueslave
  18. +94 −0 ctx-agent/ctx-scripts/5-thishost-finalize/publicnic
  19. +21 −0 ctx-agent/ctx-scripts/clean.sh
  20. +9 −0 ctx-agent/ctx-scripts/problem.sh
  21. +178 −0 ctx-agent/ctx/ctx.conf
  22. +10 −0 ctx-agent/ctx/launch.sh
  23. +37 −0 ctx-agent/ctx/lib/err-template-001.xml
  24. +45 −0 ctx-agent/ctx/lib/err-template-002.xml
  25. +40 −0 ctx-agent/ctx/lib/ok-template-001.xml
  26. +40 −0 ctx-agent/ctx/lib/ok-template-002.xml
  27. +141 −0 ctx-agent/ctx/lib/pylib/embeddedET/ElementInclude.py
  28. +196 −0 ctx-agent/ctx/lib/pylib/embeddedET/ElementPath.py
  29. +1,254 −0 ctx-agent/ctx/lib/pylib/embeddedET/ElementTree.py
  30. +230 −0 ctx-agent/ctx/lib/pylib/embeddedET/HTMLTreeBuilder.py
  31. +103 −0 ctx-agent/ctx/lib/pylib/embeddedET/SgmlopXMLTreeBuilder.py
  32. +144 −0 ctx-agent/ctx/lib/pylib/embeddedET/SimpleXMLTreeBuilder.py
  33. +279 −0 ctx-agent/ctx/lib/pylib/embeddedET/SimpleXMLWriter.py
  34. +6 −0 ctx-agent/ctx/lib/pylib/embeddedET/TidyHTMLTreeBuilder.py
  35. +128 −0 ctx-agent/ctx/lib/pylib/embeddedET/TidyTools.py
  36. +113 −0 ctx-agent/ctx/lib/pylib/embeddedET/XMLTreeBuilder.py
  37. +30 −0 ctx-agent/ctx/lib/pylib/embeddedET/__init__.py
  38. +34 −0 ctx-agent/ctx/lib/retr-template-001.xml
  39. +42 −0 ctx-agent/ctx/lib/retr-template-002.xml
  40. +3,494 −0 ctx-agent/ctx/lib/workspace_ctx_retrieve.py
  41. +21 −0 ctx-agent/ctx/sample-rc-local-entry.txt
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# This configures the local /etc/hosts file with all members of the context.
+# Don't change or delete this script unless you know what you are doing.
+
+echo ""
+echo "etchosts script"
+echo "IP: $1"
+echo "Short hostname: $2"
+echo "Hostname: $3"
+
+echo "$1 $3 $2" >> /etc/hosts
+
+
+exit 0
+
+
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+# ************************************************************************* #
+# 1-ipandhost scripts are called when the context broker tells this node it #
+# *requires* to know about nodes playing the role. If this node is told it #
+# requires to know about nodes playing the "xyz" role, then if a script #
+# called "xyz" lives in this directory, it will be called with IP, short #
+# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
+# *provides* the required "xyz" role. #
+# ************************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "NFS client required: we are being told this node requires an NFS client,"
+echo "therefore it will be playing the role of NFS server."
+echo ""
+
+# This script configures the NFS server exports policy to allow the one client
+# it's now hearing about to import the exported volumes.
+
+echo "NFS CLIENT IP: $1"
+echo "NFS CLIENT Short hostname: $2"
+echo "NFS CLIENT Hostname: $3"
+
+EXPORTS="/etc/exports"
+
+# append IP to every non-empty line in EXPORTS
+RESULT="`sed -e \"s/ \$/ $1(rw,no_root_squash) /\" $EXPORTS`"
+
+echo "$RESULT" > "$EXPORTS"
+
+exit 0
+
+
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+# ************************************************************************* #
+# 1-ipandhost scripts are called when the context broker tells this node it #
+# *requires* to know about nodes playing the role. If this node is told it #
+# requires to know about nodes playing the "xyz" role, then if a script #
+# called "xyz" lives in this directory, it will be called with IP, short #
+# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
+# *provides* the required "xyz" role. #
+# ************************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "NFS server required: we are being told this node requires an NFS server,"
+echo "therefore it will be playing the role of an NFS client."
+echo ""
+
+# This script configures the fstab file to list an import from this server node
+# we are hearing about. Repeat below for each volume to import. If you have
+# multiple NFS servers, each exporting something different, make sure you have
+# DIFFERENT role names for each (like "nfsserver1" "nfsserver2" etc).
+
+echo "NFS SERVER IP: $1"
+echo "NFS SERVER Short hostname: $2"
+echo "NFS SERVER Hostname: $3"
+
+echo "$1:/home /home nfs tcp,rsize=32768,wsize=32768,intr,soft,user,exec 0 0" >> /etc/fstab
+echo "$1:/etc/grid-security/certificates /etc/grid-security/certificates nfs tcp,rsize=32768,wsize=32768,intr,soft,user,exec 0 0" >> /etc/fstab
+
+exit 0
+
+
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# ************************************************************************* #
+# 1-ipandhost scripts are called when the context broker tells this node it #
+# *requires* to know about nodes playing the role. If this node is told it #
+# requires to know about nodes playing the "xyz" role, then if a script #
+# called "xyz" lives in this directory, it will be called with IP, short #
+# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
+# *provides* the required "xyz" role. #
+# ************************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "TESTROLE1 IP: $1"
+echo "TESTROLE1 Short hostname: $2"
+echo "TESTROLE1 Hostname: $3"
+
+echo -e "Testing: testrole1 is provided by host $3 \n" >> /tmp/testrole1
+
+
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# ************************************************************************* #
+# 1-ipandhost scripts are called when the context broker tells this node it #
+# *requires* to know about nodes playing the role. If this node is told it #
+# requires to know about nodes playing the "xyz" role, then if a script #
+# called "xyz" lives in this directory, it will be called with IP, short #
+# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
+# *provides* the required "xyz" role. #
+# ************************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "TESTROLE2 IP: $1"
+echo "TESTROLE2 Short hostname: $2"
+echo "TESTROLE2 Hostname: $3"
+
+echo -e "Testing: testrole2 is provided by host $3 \n" >> /tmp/testrole2
+
+
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# ************************************************************************* #
+# 1-ipandhost scripts are called when the context broker tells this node it #
+# *requires* to know about nodes playing the role. If this node is told it #
+# requires to know about nodes playing the "xyz" role, then if a script #
+# called "xyz" lives in this directory, it will be called with IP, short #
+# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
+# *provides* the required "xyz" role. #
+# ************************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "TESTROLE3 IP: $1"
+echo "TESTROLE3 Short hostname: $2"
+echo "TESTROLE3 Hostname: $3"
+
+echo -e "Testing: testrole3 is provided by host $3 \n" >> /tmp/testrole3
+
+
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+# ************************************************************************* #
+# 1-ipandhost scripts are called when the context broker tells this node it #
+# *requires* to know about nodes playing the role. If this node is told it #
+# requires to know about nodes playing the "xyz" role, then if a script #
+# called "xyz" lives in this directory, it will be called with IP, short #
+# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
+# *provides* the required "xyz" role. #
+# ************************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "Torque master required: we are being told this node requires a Torque"
+echo "master, therefore it will be playing the role of Torque slave."
+echo ""
+
+echo "TORQUEMASTER IP: $1"
+echo "TORQUEMASTER Short hostname: $2"
+echo "TORQUEMASTER Hostname: $3"
+
+# Configure the Torque server name to enslave to. Replace the file contents,
+# do not append since only one can be handled. It would be a misconfiguration
+# to have more than one node playing the torquemaster role in a context.
+
+echo "\$pbsserver $3" > /var/spool/torque/mom_priv/config
+echo "$3" > /var/spool/torque/server_name
+
+exit 0
+
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+# ************************************************************************* #
+# 1-ipandhost scripts are called when the context broker tells this node it #
+# *requires* to know about nodes playing the role. If this node is told it #
+# requires to know about nodes playing the "xyz" role, then if a script #
+# called "xyz" lives in this directory, it will be called with IP, short #
+# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
+# *provides* the required "xyz" role. #
+# ************************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "Torque slave required: we are being told this node requires a Torque"
+echo "slave, therefore it will be playing the role of Torque master."
+echo ""
+
+echo "TORQUESLAVE IP: $1"
+echo "TORQUESLAVE Short hostname: $2"
+echo "TORQUESLAVE Hostname: $3"
+
+# Add this node we are hearing about to the nodes list. For every node that
+# provides the torqueslave role, this script will be called. Therefore we
+# APPEND (not replace) to this nodes file:
+
+echo "$2 np=2" >> /var/spool/torque/server_priv/nodes
+
+
+# This is a trick to send a message to another script:
+touch /root/this_node_is_torque_master
+
+exit 0
+
+
@@ -0,0 +1,89 @@
+#!/bin/sh
+
+# ************************************************************************ #
+# 2-thishost scripts are called with *this* node's IP, short hostname and #
+# fully qualified hostname. It can be useful in multi NIC situations #
+# especially. #
+# ************************************************************************ #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+# NOTE: The name of this script must correspond to the interface name that
+# the context broker knows, not the local interface name which may or
+# may not match.
+#
+# The context agent can only handle two NICs at most. By convention, if
+# there is more than one NIC, the nics need to be labelled "publicnic" or
+# "localnic" as defined by the metadata server. On EC2, the publicnic
+# is the public IP address that NATs the VM (it does not correspond to
+# an actual NIC in the VM). Note again that the labelling is NOT the
+# interface name in the VM but rather the labels in the contextualization
+# document where different roles may be played by different IP addresses
+# (and they are labelled with NIC names).
+
+echo "publicnic thishost script: configuring local programs before restarts"
+
+echo "This IP: $1"
+echo "This short local hostname: $2"
+echo "This FQDN: $3"
+
+
+# SSH host based authentication was configured already, restart SSH to load
+# new config.
+
+/etc/init.d/sshd restart
+
+# We're overloading torque master role to imply other head node setups.
+# Could do something explicit too.
+if [ ! -e "/root/this_node_is_torque_master" ]; then
+ exit 0
+fi
+
+
+# For torque, replace entire contents of these files
+echo "$3" > /var/spool/torque/server_name
+echo "root@$3" > /var/spool/torque/server_priv/acl_svr/operators
+
+
+# Create a self-signed cert for onboard CA operations
+/root/bin/ca.sh $3 > /root/safe/host.0
+if [ $? -ne 0 ]; then
+ echo "failed to make onboard trust root"
+ exit 1
+fi
+
+
+# node should trust itself. We need to get the cert hash for the
+# filename first:
+HASH=`openssl x509 -in /root/safe/host.0 -hash -noout`
+CAFILE="/etc/grid-security/certificates/$HASH.0"
+cp /root/safe/host.0 $CAFILE
+echo "New 'CA' cert: $CAFILE"
+
+# for user pickup
+cp $CAFILE /root/certs/
+
+function makefile (){
+ touch $3
+ chown $1 $3
+ chmod $2 $3
+}
+
+makefile root 444 /etc/grid-security/hostcert.pem
+cp /root/safe/host.0 /etc/grid-security/hostcert.pem
+makefile root 400 /etc/grid-security/hostkey.pem
+cp /root/safe/host.key /etc/grid-security/hostkey.pem
+
+makefile globus 444 /etc/grid-security/containercert.pem
+cp /root/safe/host.0 /etc/grid-security/containercert.pem
+makefile globus 400 /etc/grid-security/containerkey.pem
+cp /root/safe/host.key /etc/grid-security/containerkey.pem
+
+
+
+
+
+
+
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+# ******************************************************************* #
+# 3-data scripts have filenames that correspond to data names in the #
+# context. If this node is told it requires data 'xyz' and a script #
+# in the 3-data directory bears the name 'xyz' then it is called. #
+# The data value has already been written out to a temporary file. #
+# The absolute path of that file is given as argument $1 to this #
+# script for you to do as you please. #
+# ******************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "'gridmap' data"
+echo "Filename with data value: $1"
+
+echo "Copying that to /etc/grid-security/grid-mapfile"
+
+cp $1 /etc/grid-security/grid-mapfile
+
+
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+# ******************************************************************* #
+# 3-data scripts have filenames that correspond to data names in the #
+# context. If this node is told it requires data 'xyz' and a script #
+# in the 3-data directory bears the name 'xyz' then it is called. #
+# The data value has already been written out to a temporary file. #
+# The absolute path of that file is given as argument $1 to this #
+# script for you to do as you please. #
+# ******************************************************************* #
+
+RELDIR=`dirname $0`
+ABSDIR=`cd $RELDIR; pwd`
+echo "Hello from \"$ABSDIR/$0\""
+
+echo ""
+echo "Filename with data value: $1"
+
+echo "This is being used as an example of how one could do a trigger. The "
+echo "presence of this data in the context tells us to indeed start the "
+echo "container."
+
+# This "sends a message" to another script:
+touch /root/do_startcontainer
+
Oops, something went wrong.

0 comments on commit 67dffb6

Please sign in to comment.