Browse files

Merge remote-tracking branch 'jfishman/master'

Builds a bundled mstress jar and tar file, excluded from the ALL target of the main build, using ant
  • Loading branch information...
2 parents 3148dd7 + 5e08401 commit eef3dc4dece787a7eecd03d560074e40685e20e2 Jeremy R. Fishman committed Feb 14, 2013
View
73 benchmarks/mstress/CMakeLists.txt
@@ -19,7 +19,8 @@
# Build the C++ mstress benchmark
#
-add_executable (mstress_client mstress_client.cc)
+add_executable (mstress_client EXCLUDE_FROM_ALL mstress_client.cc)
+
IF (USE_STATIC_LIB_LINKAGE)
add_dependencies (mstress_client kfsClient)
target_link_libraries (mstress_client kfsClient qcdio pthread)
@@ -32,5 +33,71 @@ IF (NOT APPLE)
target_link_libraries (rt)
ENDIF (NOT APPLE)
-install (TARGETS mstress_client
- RUNTIME DESTINATION bin/benchmarks)
+add_custom_command (
+ OUTPUT mstress.jar
+ COMMAND ant -f ${CMAKE_CURRENT_SOURCE_DIR}/build.xml
+ DEPENDS MStress_Client.java
+ COMMENT The HDFS mstress client bundled as a jar.
+ VERBATIM
+)
+
+add_custom_target (mstress DEPENDS mstress_client mstress.jar)
+
+set (mstress_scripts
+ mstress_plan.py
+ mstress_run.py
+ mstress.py
+ mstress_cleanup.py
+ mstress_install.sh
+)
+
+foreach (script ${mstress_scripts})
+ add_custom_command (
+ OUTPUT ${script}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/${script} ${CMAKE_CURRENT_BINARY_DIR}/
+ )
+endforeach(script)
+
+add_custom_target (
+ mstress-scripts
+ DEPENDS ${mstress_scripts}
+ COMMENT Copy mstress scripts to build directory.
+)
+
+add_custom_target (
+ mstress-tarball
+ COMMAND cd .. && ${CMAKE_COMMAND} -E tar czvf mstress.tgz
+ mstress/mstress_client
+ mstress/mstress.jar
+ mstress/*.py
+ mstress/*.sh
+ DEPENDS mstress_client mstress.jar
+ COMMENT Bundle mstress files in a tar archive.
+)
+add_dependencies(mstress-tarball mstress-scripts)
+
+get_property (metaserver_location TARGET metaserver PROPERTY LOCATION)
+get_property (chunkserver_location TARGET chunkserver PROPERTY LOCATION)
+add_custom_target (
+ mstress-bootstrap
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/bin/
+ COMMAND ${CMAKE_COMMAND} -E copy ${metaserver_location} ${CMAKE_CURRENT_BINARY_DIR}/bin/
+ COMMAND ${CMAKE_COMMAND} -E copy ${chunkserver_location} ${CMAKE_CURRENT_BINARY_DIR}/bin/
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/webui ${CMAKE_CURRENT_BINARY_DIR}/webui
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/examples/sampleservers/sample_setup.py ${CMAKE_CURRENT_BINARY_DIR}/setup.py
+ COMMAND cd .. && ${CMAKE_COMMAND} -E tar czvf mstress-bootstrap.tgz
+ mstress/mstress_client
+ mstress/mstress.jar
+ mstress/*.py
+ mstress/*.sh
+ mstress/bin/metaserver
+ mstress/bin/chunkserver
+ mstress/webui
+ DEPENDS mstress_client mstress.jar metaserver chunkserver
+ COMMENT Bundle mstress files along with QFS binaries.
+)
+add_dependencies(mstress-bootstrap mstress-scripts)
+
+set_directory_properties (PROPERTIES
+ ADDITIONAL_MAKE_CLEAN_FILES "../mstress.tgz;mstress.jar;${mstress_scripts}"
+)
View
2 benchmarks/mstress/MStress_Client.java
@@ -17,7 +17,7 @@
* License for the specific language governing permissions and limitations
* under the License.
*
- * This Java client performs filesystem meta opetarions on the Hadoop namenode
+ * This Java client performs filesystem meta operations on the Hadoop namenode
* using HDFS DFSClient.
*/
View
93 benchmarks/mstress/README
@@ -74,28 +74,27 @@ specified by the benchmark plan.
[2] Files
=========
- - mstress_initialize.sh
- Helper script to be used before compiling the source and deploying the
- mstress bundle.
- Do ./mstress_initialize.sh --h to see options.
+ - CMakeLists.txt
+ Builds the QFS stress client (C++) and HDFS stress client (Java) along with
+ the main release build.
- - Makefile
- Used to build the QFS stress client (C++) and HDFS stress client (Java).
- Ensure that $JAVA_HOME is set correctly.
+ - build.xml, ivy.xml, ivysettings.xml
+ Ant build file and Ivy dependency and settings files, used by CMakeLists.txt
+ to build the HDFS stress client (Java). Ensure that $JAVA_HOME is set.
- mstress_client.cc
Produces the mstress_client binary that actually drives the QFS metaserver.
- Build using the provided Makefile ('make ccclient')
+ Builds to $GIT_DIR/build/release/bin/benchmarks/mstress_client
See 'Benchmarking Procedure' below for details.
- MStress_Client.java
- Produces the java MStress_Client for HDFS namenode.
- Build using the provided Makefile ('make javaclient')
+ Produces the java MStress_Client for HDFS namenode. Built using ant to
+ $GIT_DIR/build/release/bin/bin/benchmarks/mstress.jar
See 'Benchmarking Procedure' below for details.
- - mstress_prepare_master_clients.sh
- Helper script used to copy the mstress directory to a list of hosts. To be
- used after running make.
+ - mstress_install.sh
+ Helper script used to deploy mstress to a list of hosts. Will invoke cmake
+ and make under ./build/.
- mstress_plan.py
Used to generate a plan file for benchmarking.
@@ -141,65 +140,20 @@ on the same box, "localhost".
(1) Setup the QFS metaserver and HDFS namenode with the help of
section [4] "Setting up DFS metaserver/namenode" below.
-
(2) You should have SSH key authentication set up on the hosts involved so
that the scripts can do password/passphrase-less login.
+(3) On the build host, compile and install QFS using the steps described in
+ https://github.com/quantcast/qfs/wiki/Developer-Documentation.
-(3) On the build machine, ensure that you have the Cloudera HDFS client jars.
- This is typically at /usr/lib/hadoop/client/*.jars.
- If you don't have them, install them by,
- 1. Add the following to /etc/yum.repos.d/thirdparty.repo (sudo needed)
- -----------------------------------
- [cloudera-cdh4]
- name=Cloudera's Distribution for Hadoop, Version 4
- baseurl=http://archive.cloudera.com/cdh4/redhat/6/x86_64/cdh/4/
- gpgkey = http://archive.cloudera.com/cdh4/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera
- gpgcheck = 1
- -----------------------------------
- 2. sudo yum install hadoop-client
-
-
-(4) On the build host, execute 'mstress_initialize.sh' to set up jar paths.
- ./mstress_initialize.sh /usr/lib/hadoop/client/
-
-
-(5) On the build host, compile and install QFS using the steps described in
- https://github.com/quantcast/qfs/wiki/Developer-Documentation. Then change
- directory to benchmarks/mstress, and just issuing 'make' should build the
- Java/C++ clients.
-
- To manually build C++ client:
- - Assuming the QFS code is in ~/code/qfs, compile and install QFS using
- the steps described in
- https://github.com/quantcast/qfs/wiki/Developer-Documentation
- - cd ~/code/qfs/benchmarks/mstress
- - QFS_BUILD_INCLUDE=~/code/qfs/build/include \
- QFS_BUILD_STATLIB=~/code/qfs/build/lib/static \
- BOOST_LIBRARY_DIR=/opt/local/lib \
- make
- If you encounter any build problem, ensure that your QFS_BUILD_INCLUDE etc.
- refer to valid paths.
-
-
- To manually build MStress_Client.java
- - Compile MStress_client.java with hadoop-client jars in the class path.
- theCP=$(echo mstress_hdfs_client_jars/*.jar | sed 's/ /:/g')
- javac -cp $theCP MStress_Client.java
-
-
-(6) Determine the master and load generating client hosts that you want to use
+(4) Determine the master and load generating client hosts that you want to use
to connect to the DFS server. This could just be "localhost" if you want to
run the benchmark locally.
+(5) From the build host in this directory, run `./mstress_install.sh hosts..`
+ to deploy mstress files to the participating hosts under ~/mstress.
-(7) From the build host, use "mstress_prepare_master_clients.sh" to copy your
- mstress directory to the participating hosts.
- Note: Do './mstress_prepare_master_clients.sh localhost' localhost-only run.
- The mstress directory paths should be the same on master and client hosts.
-
-
-(8) On the master host change directory to ~/mstress
+(6) On the master host change directory to ~/mstress
Create a plan file using mstress_plan.py.
Do ./mstress_plan.py --help to see example usage.
Eg:
@@ -216,19 +170,18 @@ on the same box, "localhost".
The plan file gets copied to the /tmp directory where you run it. It will
also get copied to the participating client hosts in the '-c' option.
-
-(9) Checklist: check the presence of,
- - the plan file on master host and client hosts (step 8 does this for you)
+(7) Checklist: check the presence of,
+ - the plan file on master host and client hosts (step 6 does this for you)
- the mstress_client binaries (QFS and HDFS clients) on master and all
- client hosts (step 7).
+ client hosts (step 5).
-(10) Run the benchmark from the master with mstress.py.
+(8) Run the benchmark from the master with mstress.py.
Do ./mstress.py --help to see options.
Eg:
./mstress.py -f qfs -s <metahost> -p <metaport> -a </tmp/something.plan>
./mstress.py -f hdfs -s <namehost> -p <nameport> -a </tmp/something.plan>
-(11) The benchmark name, progress, and time taken will be printed out.
+(9) The benchmark name, progress, and time taken will be printed out.
[4] DFS Server Setup
View
68 benchmarks/mstress/build.xml
@@ -1,15 +1,42 @@
-<project name="qfs_mstress" default="compile" xmlns:ivy="antlib:org.apache.ivy.ant">
+<!--
+# $Id$
+#
+# Copyright 2012 Quantcast Corp.
+#
+# This file is part of Quantcast File System (QFS).
+#
+# Licensed under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# Build the Java mstress benchmark against HDFS.
+#
+-->
+<project name="mstress" default="build" xmlns:ivy="antlib:org.apache.ivy.ant">
- <property name="ivy.install.version" value="2.3.0" />
- <property name="ivy.jar.dir" value="${basedir}/.ivy" />
+ <property name="src.dir" value="${basedir}" />
+ <property name="dist.dir" value="${user.dir}" />
+ <property name="build.dir" value="${user.dir}/build" />
+
+ <property name="ivy.jar.dir" value="${user.dir}/.ivy" />
<property name="ivy.jar.file" value="${ivy.jar.dir}/ivy.jar" />
- <property name="build.dir" value="build/output" />
- <property name="src.dir" value="." />
+ <property name="ivy.install.version" value="2.3.0" />
+
+ <property name="hadoop.build.version" value="2.0.2-alpha" />
<!--=======================
target: get-ivy
- ===========================-->
- <target name="get-ivy" unless="skip.download">
+ ========================-->
+ <available property="ivy.jar.present" file="${ivy.jar.file}"/>
+ <target name="get-ivy" unless="ivy.jar.present">
<mkdir dir="${ivy.jar.dir}"/>
<echo message="installing ivy..."/>
<get src="http://repo1.maven.org/maven2/org/apache/ivy/ivy/${ivy.install.version}/ivy-${ivy.install.version}.jar"
@@ -18,7 +45,7 @@
<!--=======================
target: setup-ivy
- ===========================-->
+ ========================-->
<target name="setup-ivy" depends="get-ivy" description="==> install ivy">
<path id="ivy.lib.path">
<fileset dir="${ivy.jar.dir}" includes="*.jar"/>
@@ -28,22 +55,35 @@
<!--=======================
target: resolve
- ===========================-->
+ ========================-->
<target name="resolve" depends="setup-ivy" description="==> resolve ivy dependencies">
<ivy:cachepath pathid="ivy.lib.path" conf="compile" />
+ <ivy:cachefileset setid="runtime.classpath" conf="runtime" type="jar,bundle" />
</target>
<!--=======================
target: compile
- ===========================-->
+ ========================-->
<target name="compile" depends="resolve" description="==> compile sources">
<mkdir dir="${build.dir}" />
<javac srcdir="${src.dir}" destdir="${build.dir}" classpathref="ivy.lib.path" />
</target>
<!--=======================
+ target: build
+ ========================-->
+ <target name="build" depends="compile" description="==> package files into a jar">
+ <jar jarfile="${dist.dir}/mstress.jar" basedir="${build.dir}" compress="true">
+ <zipgroupfileset refid="runtime.classpath" />
+ <manifest>
+ <attribute name="Main-Class" value="MStress_Client" />
+ </manifest>
+ </jar>
+ </target>
+
+ <!--=======================
target: run
- ===========================-->
+ ========================-->
<target name="run" depends="compile, input-runargs" description="==> run the client">
<java classname="MStress_Client">
<arg line="${runArgs}" />
@@ -59,7 +99,7 @@
<!--=======================
target: clean
- ===========================-->
+ ========================-->
<target name="clean" description="==> clean the project">
<delete includeemptydirs="true" quiet="true">
<fileset dir="${build.dir}" />
@@ -68,14 +108,14 @@
<!--=======================
target: clean-ivy
- ===========================-->
+ ========================-->
<target name="clean-ivy" description="==> clean the ivy installation">
<delete dir="${ivy.jar.dir}"/>
</target>
<!--=======================
target: clean-cache
- ===========================-->
+ ========================-->
<target name="clean-cache" depends="setup-ivy" description="==> clean the ivy cache">
<ivy:cleancache />
</target>
View
106 benchmarks/mstress/curl_run_mstress.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+PORT=40000 # metaserver client port
+CLIENTS=5
+LEVELS=3
+INODES=10
+STATS=1000
+
+url=$1
+tarfile=${2:-"mstress.tgz"}
+if [ -z "$url" ]
+then
+ echo >&2 "Usage: $0 url [tgz]"
+ exit 64
+fi
+
+date=`date +%Y%m%d%H%M%S`
+exec > >(tee mstress.log.$date)
+exec 2>&1
+
+echo "Downloading: $url"
+tmpfile=`mktemp $tarfile.XXXXXX` || exit $?
+if [ -f "$tarfile" ]
+then
+ code=`curl -sf "$url" -z "$tarfile" -o "$tmpfile" -w '%{http_code}'`
+else
+ code=`curl -sf "$url" -o "$tmpfile" -w '%{http_code}'`
+fi
+
+if [ $? -ne 0 ]
+then
+ echo >&2 "Failed to curl $url"
+ rm "$tmpfile"
+elif [ "$code" = 200 ]
+then
+ mv "$tmpfile" "$tarfile"
+elif [ "$code" = 304 ]
+then
+ echo " not modified"
+ rm "$tmpfile"
+fi
+
+if [ ! -f "$tarfile" ]
+then
+ exit 1
+fi
+
+echo "Unpacking $tarfile"
+contents=`tar xzfv "$tarfile"` || exit $?
+echo -n "$contents" | sed 's/^/ /'
+
+echo "Installing client files"
+./mstress/mstress_install.sh localhost || exit $?
+
+echo "Configuring meta and chunk servers"
+cd mstress
+cat >setup.cfg <<EOF
+[metaserver]
+hostname = localhost
+rundir = ~/qfsbase/meta
+clientport = $PORT
+chunkport = `expr $PORT + 100`
+clusterkey = myTestCluster
+
+[chunkserver1]
+hostname = localhost
+rundir = ~/qfsbase/chunk1
+chunkport = `expr $PORT + 1000`
+# in practice, have a chunkdir per disk.
+chunkdirs = ~/qfsbase/chunk1/chunkdir11 ~/qfsbase/chunk1/chunkdir12
+
+[chunkserver2]
+hostname = localhost
+rundir = ~/qfsbase/chunk2
+chunkport = `expr $PORT + 1001`
+# in practice, have a chunkdir per disk.
+chunkdirs = ~/qfsbase/chunk2/chunkdir21
+
+[webui]
+hostname = localhost
+rundir = ~/qfsbase/web
+webport = 42000
+EOF
+./setup.py -c setup.cfg -r . -s . -a install || exit $?
+
+echo "Running benchmark"
+(
+ ./mstress_plan.py -c localhost -n $CLIENTS -t file -l $LEVELS -i $INODES -s $STATS -o mstress.plan &&
+ ./mstress.py -f qfs -s localhost -p $PORT -a mstress.plan
+ # disabled - unknown hanging error on create
+ #./mstress.py -f hdfs -s localhost -p $PORT -a mstress.plan
+ )
+ret=$?
+
+uid=`id -u`
+pid=`pgrep -u $uid metaserver`
+ctime=`awk '{print $14,$15}' /proc/$pid/stat`
+rev=`strings bin/metaserver | grep -B1 KFS_BUILD_INFO_END | head -1 | cut -d@ -f2 | cut -c-16`
+
+./setup.py -c setup.cfg -r . -s . -a uninstall || exit $?
+
+if [ $ret -eq 0 ]
+then
+ echo "Metaserver cpu usage (date gitrev utime stime):"
+ echo "$date $rev $ctime"
+fi
+exit $ret
View
13 benchmarks/mstress/ivy.xml
@@ -1,22 +1,21 @@
<ivy-module version="1.0" xmlns:e="http://ant.apache.org/ivy/extra"
xmlns:m="http://ant.apache.org/ivy/maven">
- <info organisation="com.quantcast" module="qfs_mstress"/>
+ <info organisation="com.quantcast" module="mstress"/>
<configurations>
<conf name="compile" description="Dependencies needed to compile" />
<conf name="master" visibility="public" description="The artifacts published by this project" />
- <conf name="source" visibility="public" />
- <conf name="javadoc" visibility="public" />
+ <conf name="runtime" description="All dependendies needed to run the client" />
</configurations>
<publications>
<artifact type="jar" conf="master" ext="jar"/>
- <artifact m:classifier="sources" type="source" conf="source" ext="zip"/>
- <artifact m:classifier="javadoc" type="javadoc" conf="javadoc" ext="zip"/>
</publications>
<dependencies defaultconfmapping="*->default">
- <dependency org="org.apache.hadoop" name="hadoop-common" rev="latest.integration" conf="compile->master"/>
- <dependency org="org.apache.hadoop" name="hadoop-hdfs" rev="latest.integration" conf="compile->master"/>
+ <dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop.build.version}"
+ conf="compile->master; runtime->runtime,master" />
+ <dependency org="org.apache.hadoop" name="hadoop-hdfs" rev="${hadoop.build.version}"
+ conf="compile->master; runtime->runtime,master" />
</dependencies>
</ivy-module>
View
12 benchmarks/mstress/ivysettings.xml
@@ -0,0 +1,12 @@
+<ivysettings>
+ <settings defaultResolver="main"/>
+ <include url="${ivy.default.settings.dir}/ivysettings-local.xml"/>
+ <resolvers>
+ <!-- set checkconsistency="false" to bypass POM error in commons-daemon -->
+ <ibiblio name="public" m2compatible="true" checkconsistency="false" />
+ <chain name="main" dual="true">
+ <resolver ref="local"/>
+ <resolver ref="public"/>
+ </chain>
+ </resolvers>
+</ivysettings>
View
3 benchmarks/mstress/mstress.py
@@ -407,8 +407,7 @@ def SetGlobalPaths(opts):
Globals.SERVER_CMD = Globals.KFS_SERVER_CMD
Globals.SERVER_KEYWORD = Globals.KFS_SERVER_KEYWORD
elif opts.filesystem == 'hdfs':
- hdfsjars = commands.getoutput("echo %s/mstress_hdfs_client_jars/*.jar | sed 's/ /:/g'" % mydir)
- Globals.CLIENT_PATH = 'java -Xmx256m -cp %s:%s MStress_Client' % (mydir,hdfsjars)
+ Globals.CLIENT_PATH = 'java -Xmx256m -jar %s/mstress.jar' % mydir
Globals.SERVER_CMD = Globals.HDFS_SERVER_CMD
Globals.SERVER_KEYWORD = Globals.HDFS_SERVER_KEYWORD
else:
View
77 benchmarks/mstress/mstress_initialize.sh
@@ -1,77 +0,0 @@
-#!/bin/sh
-
-# $Id$
-#
-# Author: Thilee Subramaniam
-#
-# Copyright 2012 Quantcast Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-#
-# The mstress runs stress tests on QFS metaserver and HDFS namenode. To be able
-# to work with the namenode, the java HDFS client needs to be compiled and run
-# with hadoop hdfs jars.
-#
-# To make this process uniform across mstress master and slaves, we copy the
-# jars to a local directory and copy it around to all participating hosts at
-# a fixed location.
-# (Without this, each client would need to add entries to /etc/yum.repos.d/,
-# and require root access to install hadoop client rpms).
-#
-# This script packages the jars locally on the build node, so that the
-# 'mstress_prepare_clients.sh' script can copy it over to master and clients.
-#
-# Run this program with the path of hadoop client jars as argument (default
-# /usr/lib/hadoop/client/), and it will create a "mstress_hdfs_client_jars"
-# directory containing the jars.
-#
-
-if [[ "$1" = -* ]]
-then
- echo "Usage: $0 [ path/to/hadoop/client/jars ]"
- echo " This prepares the build environment on mstress build host"
- echo " Default path is '/usr/lib/hadoop/client/'"
- exit
-fi
-
-JARS_SOURCE=${1:-"/usr/lib/hadoop/client/"}
-
-DIR="$( cd "$( dirname "$0" )" && pwd )"
-JARS_TARGET=${DIR}/mstress_hdfs_client_jars
-
-if [ ! -d "$JARS_SOURCE" ] || [ -z "$(ls -A "$JARS_SOURCE"/*.jar)" ]; then
- echo ""$JARS_SOURCE" is not a directory or does not have the jars."
- exit 1
-fi
-
-if [ -d "$JARS_TARGET" ]; then
- if [ "$(ls -A "$JARS_TARGET"/*.jar 2> /dev/null)" ]; then
- echo ""$JARS_TARGET" already has the jars. Nothing to do."
- exit 0
- fi
-fi
-
-mkdir -p "$JARS_TARGET"
-cp $JARS_SOURCE/*.jar "$JARS_TARGET"
-
-if [ $? -ne 0 ]
-then
- echo "Failed to copy jars."
- exit 1
-fi
-
-echo "Hadoop client jars from $JARS_SOURCE copied to $JARS_TARGET."
-
-exit 0
-
View
43 ...mstress/mstress_prepare_master_clients.sh → benchmarks/mstress/mstress_install.sh
@@ -1,10 +1,7 @@
#!/bin/sh
-
#
# $Id$
#
-# Author: Thilee Subramaniam
-#
# Copyright 2012 Quantcast Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
@@ -28,35 +25,33 @@
# and untar + unzip them for usage.
#
-if [ -z $1 ] || [[ "$1" = -* ]]
+TAR=${TAR:-"tar"}
+
+if [ -z "$BOOTSTRAP" ]
then
- echo "Usage: $0 <comma-separated hosts>"
- echo " This copies the mstress bundle to master and client hosts."
- exit
+ tarfile="mstress.tgz"
+ target="mstress-tarball"
+else
+ tarfile="mstress-bootstrap.tgz"
+ target="mstress-bootstrap"
fi
-which tar &>/dev/null
-if [ $? -ne 0 ]
+if [ $# -lt 1 ]
then
- echo "tar command not found."
- exit 1
+ echo "Usage: $0 <comma-separated hosts>"
+ echo " This copies the mstress bundle to master and client hosts."
+ exit
fi
-script_dir=$(dirname "$0")
-
-cd $script_dir/.. && tar cvfz mstress.tgz mstress
-if [ $? -ne 0 ]
+if [ ! -f "$tarfile" ]
then
- echo "failed to create archive."
- cd -
- exit 1
+ [ -d build ] || mkdir build
+ (cd build && cmake ../../.. && make "$target" && cp "benchmarks/$tarfile" ..) || exit 1
fi
-cd -
-for v in `echo "$@"|sed 's/,/ /g'`
+while [ $# -ne 0 ]
do
- ssh $v "rm -rf ~/mstress*"
- scp $script_dir/../mstress.tgz $v:~
- ssh $v "tar xvfz mstress.tgz"
+ echo "Deploying mstress tarball to $1"
+ ssh $1 "$TAR xzv || echo >&2 failed to untar on '$1'" < "$tarfile"
+ shift
done
-
View
8 examples/sampleservers/sample_setup.py
@@ -128,8 +128,8 @@ def kill_running_program(binaryPath):
checkPath = os.path.split(binaryPath)[1]
if not checkPath:
return
- cmd = ('ps -ef | grep %s | grep -v grep | awk \'{print $2}\''
- % checkPath)
+ cmd = ('ps -ef | grep %s | grep %s | grep -v grep | awk \'{print $2}\''
+ % (os.getlogin(), checkPath))
res = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE).communicate()
pids = res[0].split('\n')
@@ -138,8 +138,8 @@ def kill_running_program(binaryPath):
os.kill(int(pid.strip()), signal.SIGTERM)
else:
if binaryPath.find('qfsstatus') >= 0:
- cmd = ('ps -ef | grep /qfsbase/ | grep %s | grep -v grep | awk \'{print $2}\''
- % binaryPath)
+ cmd = ('ps -ef | grep %s | grep /qfsbase/ | grep %s | grep -v grep | awk \'{print $2}\''
+ % (os.getlogin(), binaryPath))
res = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE).communicate()
pids = res[0].split('\n')

0 comments on commit eef3dc4

Please sign in to comment.