Permalink
Browse files

rebase

Summary:
rebase to make it be able to compile
Task ID: #

Blame Rev:

Reviewers:

CC:

Test Plan:

Revert Plan:

Tags:
  • Loading branch information...
1 parent 4327fba commit 68f6b21f6c57648935158c698401cc2cb3cdd38e heyongqiang committed Dec 23, 2011
Showing with 2,914 additions and 983 deletions.
  1. +4 −1 FB-CHANGES.txt
  2. +44 −23 bin/hadoop
  3. +16 −3 bin/hadoop-daemon.sh
  4. +10 −3 bin/slaves.sh
  5. +2 −2 bin/start-corona.sh
  6. +36 −0 bin/start-mapred-single.sh
  7. +1 −1 bin/start-proxyjt.sh
  8. +3 −3 bin/stop-corona.sh
  9. +28 −0 bin/stop-mapred-single.sh
  10. +51 −0 bin/stop-tasktracker.sh
  11. +4 −2 build.xml
  12. +1 −0 conf/hadoop-env.sh.template
  13. +1 −1 conf/log4j.properties
  14. +0 −9 ivy.xml
  15. +2 −2 ivy/hadoop-core.pom
  16. +2 −2 ivy/libraries.properties
  17. BIN lib/slf4j-api-1.6.1-hadoop.jar
  18. BIN lib/slf4j-log4j12-1.6.1-hadoop.jar
  19. +13 −6 src/contrib/corona/src/java/org/apache/hadoop/mapred/CoronaJobHistory.java
  20. BIN src/contrib/dynamicclouds/dynamicclouds.jar
  21. +23 −15 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/AvatarZKShell.java
  22. +69 −30 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/datanode/AvatarDataNode.java
  23. +7 −22 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/datanode/OfferService.java
  24. +79 −92 src/contrib/highavailability/src/java/org/apache/hadoop/hdfs/server/namenode/AvatarNode.java
  25. +48 −9 src/contrib/highavailability/src/test/org/apache/hadoop/hdfs/MiniAvatarCluster.java
  26. +4 −0 src/contrib/raid/ivy.xml
  27. +1 −0 src/contrib/raid/ivy/libraries.properties
  28. +38 −6 src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
  29. +7 −4 src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyRaid.java
  30. +13 −7 src/contrib/raid/src/java/org/apache/hadoop/raid/BlockReconstructor.java
  31. +10 −4 src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java
  32. +9 −3 src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
  33. +2 −2 src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java
  34. +7 −0 src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java
  35. +74 −7 src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java
  36. +1 −1 src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java
  37. +179 −79 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
  38. +8 −3 src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
  39. +28 −8 src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java
  40. +8 −2 src/contrib/raid/src/java/org/apache/hadoop/raid/StatisticsCollector.java
  41. +53 −2 src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java
  42. +76 −0 src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRaid.java
  43. +2 −0 src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java
  44. +1 −0 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java
  45. +69 −2 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
  46. +2 −1 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
  47. +1 −0 src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java
  48. +1 −1 src/contrib/raid/src/test/org/apache/hadoop/raid/TestStatisticsCollector.java
  49. +5 −4 src/contrib/raid/webapps/raid/jobmonitor.jsp
  50. +120 −73 src/core/org/apache/hadoop/fs/DU.java
  51. +61 −12 src/core/org/apache/hadoop/fs/FileSystem.java
  52. +1 −1 src/core/org/apache/hadoop/http/HttpServer.java
  53. +44 −1 src/core/org/apache/hadoop/net/NetworkTopology.java
  54. +161 −3 src/core/org/apache/hadoop/util/DataChecksum.java
  55. +9 −9 src/core/org/apache/hadoop/util/LinuxResourceCalculatorPlugin.java
  56. +27 −3 src/core/org/apache/hadoop/util/StringUtils.java
  57. +3 −1 src/hdfs/org/apache/hadoop/hdfs/BlockReaderLocal.java
  58. +105 −4 src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
  59. +44 −0 src/hdfs/org/apache/hadoop/hdfs/DFSUtil.java
  60. +8 −5 src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java
  61. +45 −42 src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java
  62. +68 −0 src/hdfs/org/apache/hadoop/hdfs/server/common/Util.java
  63. +23 −0 src/hdfs/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  64. +223 −47 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  65. +158 −79 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  66. +7 −0 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  67. +8 −0 src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
  68. +119 −56 src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
  69. +7 −1 src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
  70. +17 −16 src/hdfs/org/apache/hadoop/hdfs/server/datanode/NamespaceService.java
  71. +9 −0 src/hdfs/org/apache/hadoop/hdfs/server/datanode/VolumeMap.java
  72. +26 −48 src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
  73. +1 −1 src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
  74. +10 −1 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  75. +8 −6 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  76. +16 −17 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  77. +70 −6 src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  78. +26 −4 src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
  79. +9 −0 src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  80. +13 −5 src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  81. +3 −0 src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
  82. +58 −39 src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  83. +21 −18 src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java
  84. +1 −1 src/mapred/org/apache/hadoop/mapred/MemoryBlockAllocator.java
  85. +8 −3 src/mapred/org/apache/hadoop/mapred/ReduceTask.java
  86. +43 −4 src/mapred/org/apache/hadoop/mapred/SequenceFileRecordReader.java
  87. +25 −1 src/mapred/org/apache/hadoop/mapred/Task.java
  88. +1 −0 src/mapred/org/apache/hadoop/mapred/TaskLogServlet.java
  89. +13 −2 src/mapred/org/apache/hadoop/mapred/TaskMemoryManagerThread.java
  90. +40 −2 src/mapred/org/apache/hadoop/mapred/TaskTracker.java
  91. +5 −0 src/mapred/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
  92. +46 −9 src/test/org/apache/hadoop/fs/TestDU.java
  93. +5 −2 src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
  94. +2 −1 src/test/org/apache/hadoop/hdfs/TestDFSFinalize.java
  95. +2 −1 src/test/org/apache/hadoop/hdfs/TestDFSRollback.java
  96. +2 −1 src/test/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
  97. +3 −2 src/test/org/apache/hadoop/hdfs/TestDFSUpgrade.java
  98. +39 −39 src/test/org/apache/hadoop/hdfs/TestFileAppend4.java
  99. +4 −5 src/test/org/apache/hadoop/hdfs/TestFileCorruption.java
  100. +2 −1 src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java
  101. +10 −8 src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
  102. +16 −16 src/test/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodes.java
  103. +19 −15 src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
  104. +3 −6 src/test/org/apache/hadoop/mapred/lib/TestCombineFileInputFormat.java
  105. +44 −0 src/test/org/apache/hadoop/net/TestNetworkTopology.java
  106. +10 −4 src/test/org/apache/hadoop/util/TestPureJavaCrc32.java
View
@@ -3,7 +3,8 @@ to this release in the order indicated. This is in addition to
the patches applied from issues referenced in CHANGES.txt.
Release 0.20.3 + FB - Unreleased.
-
+ MAPREDUCE-2524 Backport trunk heuristics for failing maps when we get fetch
+ failures retrieving map output during shuffle
MAPREDUCE-2349 speedup getSplits methods in inputformats.
MAPREDUCE-2218 schedule additional tasks when killactions are dispatched
MAPREDUCE-2162 handle stddev > mean
@@ -229,3 +230,5 @@ Release 0.20.3 + FB - Unreleased.
HDFS-955 Fix Edits log/Save FSImage bugs
HADOOP-6683 the first optimization: ZlibCompressor does not fully utilize the buffer
HADOOP-7111 Several TFile tests failing when native libraries are present
+ HADOOP-7444 Add Checksum API to verify and calculate checksums "in bulk" (todd)
+ HADOOP-7443 Add CRC32C as another DataChecksum implementation (todd)
View
@@ -24,17 +24,17 @@
#
# HADOOP_CLASSPATH Extra Java CLASSPATH entries.
#
-# HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
+# HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
# Default is 1000.
#
# HADOOP_OPTS Extra Java runtime options.
-#
-# HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS
+#
+# HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS
# HADOOP_CLIENT_OPTS when the respective command is run.
-# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
-# for e.g. HADOOP_CLIENT_OPTS applies to
-# more than one command (fs, dfs, fsck,
-# dfsadmin etc)
+# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
+# for e.g. HADOOP_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
#
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
#
@@ -62,6 +62,7 @@ if [ $# = 0 ]; then
echo " dfsadmin run a DFS admin client"
echo " mradmin run a Map-Reduce admin client"
echo " fsck run a DFS filesystem checking utility"
+ echo " avatarfsck run a avatar DFS filesystem checking utility"
echo " raidfsck [path] run RAID-aware filesystem checking utility"
echo " raidshell [options] run RAID-shell utility"
echo " fs run a generic filesystem user client"
@@ -70,11 +71,11 @@ if [ $# = 0 ]; then
echo " oiv apply the offline fsimage viewer to an fsimage"
echo " oev apply the offline edits viewer to an edits file"
echo " Use -help to see options"
- echo " jobtracker run the MapReduce job Tracker node"
+ echo " jobtracker run the MapReduce job Tracker node"
echo " pipes run a Pipes job"
- echo " tasktracker run a MapReduce task Tracker node"
+ echo " tasktracker run a MapReduce task Tracker node"
echo " job manipulate MapReduce jobs"
- echo " queue get information regarding JobQueues"
+ echo " queue get information regarding JobQueues"
echo " version print the version"
echo " jar <jar> run a jar file"
echo " distcp <srcurl> <desturl> copy file or directories recursively"
@@ -83,6 +84,7 @@ if [ $# = 0 ]; then
echo " daemonlog get/set the log level for each daemon"
echo " or"
echo " CLASSNAME run the class named CLASSNAME"
+ echo " getconf get config values from configuration"
echo "Most commands print help when invoked w/o parameters."
exit 1
fi
@@ -100,14 +102,14 @@ if [ "$JAVA_HOME" != "" ]; then
#echo "run java in $JAVA_HOME"
JAVA_HOME=$JAVA_HOME
fi
-
+
if [ "$JAVA_HOME" = "" ]; then
echo "Error: JAVA_HOME is not set."
exit 1
fi
JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m
+JAVA_HEAP_MAX=-Xmx1000m
# check envvars which might override default args
if [ "$HADOOP_HEAPSIZE" != "" ]; then
@@ -133,6 +135,9 @@ fi
if [ -d "$HADOOP_HOME/build/tools" ]; then
CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools
fi
+if [ -d "$HADOOP_HOME/build/contrib/highavailability/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/contrib/highavailability/classes
+fi
# so that filenames w/ spaces are handled correctly in loops below
IFS=
@@ -167,6 +172,12 @@ for f in $HADOOP_HOME/build/hadoop-*-tools.jar; do
TOOL_PATH=${TOOL_PATH}:$f;
done
+# add user-specified CLASSPATH before corona so that a newer
+# corona jar can be specified to override the deployed one
+if [ "$HADOOP_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
+fi
+
# CORONA_PATH for corona daemons
if [ -d "$HADOOP_HOME/build/contrib/corona/classes" ]; then
CORONA_PATH=${CORONA_PATH}:$HADOOP_HOME/build/contrib/corona/classes
@@ -184,12 +195,6 @@ for f in $HADOOP_HOME/contrib/corona/lib/*.jar; do
CORONA_LIB_PATH=${CORONA_LIB_PATH}:$f;
done
-
-# add user-specified CLASSPATH last
-if [ "$HADOOP_CLASSPATH" != "" ]; then
- CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
-fi
-
# default log directory & file
if [ "$HADOOP_LOG_DIR" = "" ]; then
HADOOP_LOG_DIR="$HADOOP_HOME/logs"
@@ -210,6 +215,9 @@ fi
# restore ordinary behaviour
unset IFS
+# Enable assertions for mapred for corona testing
+HADOOP_OPTS="$HADOOP_OPTS -ea:org.apache.hadoop.mapred..."
+
# figure out which class to run
if [ "$COMMAND" = "namenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
@@ -229,9 +237,11 @@ elif [ "$COMMAND" = "secondarynamenode" ] ; then
elif [ "$COMMAND" = "avatardatanode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.datanode.AvatarDataNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_DATANODE_OPTS"
+ HADOOP_ROOT_LOGGER=${HADOOP_DATANODE_LOGGER:-$HADOOP_ROOT_LOGGER}
elif [ "$COMMAND" = "datanode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_DATANODE_OPTS"
+ HADOOP_ROOT_LOGGER=${HADOOP_DATANODE_LOGGER:-$HADOOP_ROOT_LOGGER}
elif [ "$COMMAND" = "fs" ] ; then
CLASS=org.apache.hadoop.fs.FsShell
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
@@ -250,6 +260,9 @@ elif [ "$COMMAND" = "version" ] ; then
elif [ "$COMMAND" = "fsck" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DFSck
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "avatarfsck" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.AvatarDFSck
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "raidfsck" ] ; then
CLASS=org.apache.hadoop.raid.RaidShell
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
@@ -279,17 +292,20 @@ elif [ "$COMMAND" = "jobtracker" ] ; then
elif [ "$COMMAND" = "coronaclustermanager" ] ; then
CLASS=org.apache.hadoop.corona.ClusterManagerServer
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_CORONACLUSTERMANAGER_OPTS"
- CLASSPATH=${CLASSPATH}:${CORONA_LIB_PATH}
+ # Corona lib path should be first to ensure that it uses the right thrift JAR
+ CLASSPATH=${CORONA_LIB_PATH}:${CLASSPATH}
elif [ "$COMMAND" = "coronatasktracker" ] ; then
CLASS=org.apache.hadoop.mapred.CoronaTaskTracker
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_TASKTRACKER_OPTS"
- CLASSPATH=${CLASSPATH}:${CORONA_LIB_PATH}
+ # See coronaclustermanager comment
+ CLASSPATH=${CORONA_LIB_PATH}:${CLASSPATH}
elif [ "$COMMAND" = "coronaproxyjobtracker" ] ; then
CLASS=org.apache.hadoop.mapred.ProxyJobTracker
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_CORONAPROXYJOBTRACKER_OPTS"
elif [ "$COMMAND" = "tasktracker" ] ; then
CLASS=org.apache.hadoop.mapred.TaskTracker
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_TASKTRACKER_OPTS"
+ HADOOP_ROOT_LOGGER=${HADOOP_TASKTRACKER_LOGGER:-$HADOOP_ROOT_LOGGER}
if [ -n "$HADOOP_INSTANCE" ] ; then
CMDLINE_OPTS="-instance $HADOOP_INSTANCE $CMDLINE_OPTS"
fi
@@ -329,6 +345,9 @@ elif [ "$COMMAND" = "hourglass" ] ; then
elif [ "$COMMAND" = "fairscheduler" ] ; then
CLASS=org.apache.hadoop.mapred.FairSchedulerShell
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "getconf" ] ; then
+ CLASS=org.apache.hadoop.tools.GetConf
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
else
CLASS=$COMMAND
fi
@@ -351,7 +370,7 @@ if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" ]; then
if [ -d "$HADOOP_HOME/build/native" ]; then
JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
fi
-
+
if [ -d "${HADOOP_HOME}/lib/native" ]; then
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
@@ -373,15 +392,17 @@ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.application=${HADOOP_APPLICATION:-default}"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.installationid=${CLUSTER_NAME:-default}"
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi
+fi
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
if [ "$HADOOP_DEPLOYMENT" == "server" ]; then
# Look for another instance of the class running
- ps -ef | grep $COMMAND | grep 'bin/hadoop'
+ ps -ef | grep $COMMAND | grep 'bin/hadoop'
INSTANCES=`ps -ef | grep $COMMAND | grep 'bin/hadoop' | wc -l`
if [ $INSTANCES -gt 2 ]; then
echo "Another instance of this command is running"
View
@@ -112,11 +112,20 @@ case $startStop in
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
- echo $command running as process `cat $pid`. Stop it first.
- exit 1
+ # On Linux, process pids and thread pids are indistinguishable to
+ # signals. It's possible that the pid in our pidfile is now a thread
+ # owned by another process. Let's check to make sure our pid is
+ # actually a running process.
+ ps -e -o pid | egrep "^`cat $pid`$" >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ else
+ rm $pid
+ fi
fi
fi
-
+
if [ "$HADOOP_MASTER" != "" ]; then
echo rsync from $HADOOP_MASTER
rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_HOME"
@@ -129,6 +138,10 @@ case $startStop in
nohup nice -n $HADOOP_NICENESS "$HADOOP_HOME"/bin/hadoop --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
echo $! > $pid
sleep 1; head "$log"
+ if ! kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo start failed. Check log file.
+ exit 1
+ fi
;;
(stop)
View
@@ -41,7 +41,7 @@ bin=`cd "$bin"; pwd`
. "$bin"/hadoop-config.sh
# If the slaves file is specified in the command line,
-# then it takes precedence over the definition in
+# then it takes precedence over the definition in
# hadoop-env.sh. Save it here.
HOSTLIST=$HADOOP_SLAVES
@@ -58,8 +58,15 @@ if [ "$HOSTLIST" = "" ]; then
fi
for slave in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`; do
- ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \
- 2>&1 | sed "s/^/$slave: /" &
+
+ # For localhost, don't ssh to avoid key/cert issues
+ if [ $slave == "localhost" ]; then
+ cmd="${@// /\\ }"
+ bash -c "$cmd"
+ else
+ ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$slave: /" &
+ fi
if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
sleep $HADOOP_SLAVE_SLEEP
fi
View
@@ -25,6 +25,6 @@ bin=`cd "$bin"; pwd`
# start corona daemons
# start clustermanager first to minimize connection errors at startup
-"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start corona_clustermanager
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start coronaclustermanager
"$bin"/start-proxyjt-remote.sh --config $HADOOP_CONF_DIR
-"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start corona_tasktracker
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start coronatasktracker
View
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hadoop map reduce daemons. Run this on the local machine. By default
+# logs are written to /tmp/hadoop/
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# Add contrib jars to classpath. Needed for FairScheduler
+for f in "$bin"/../build/contrib/*/*.jar; do
+ echo "Adding $f to classpath"
+ export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:$f;
+done
+
+# start mapred daemons
+# start jobtracker first to minimize connection errors at startup
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start jobtracker
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start tasktracker
View
@@ -24,4 +24,4 @@ bin=`cd "$bin"; pwd`
. "$bin"/hadoop-config.sh
# start corona proxy job tracker
-"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start corona_proxyjobtracker
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start coronaproxyjobtracker
View
@@ -23,6 +23,6 @@ bin=`cd "$bin"; pwd`
. "$bin"/hadoop-config.sh
-"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop corona_clustermanager
-"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop corona_proxyjobtracker
-"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop corona_tasktracker
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop coronaclustermanager
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop coronaproxyjobtracker
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop coronatasktracker
View
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop hadoop map reduce daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop jobtracker
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop tasktracker
+
Oops, something went wrong.

0 comments on commit 68f6b21

Please sign in to comment.