Permalink
Browse files

Update master to latest trunk

  • Loading branch information...
1 parent b4a9af6 commit 73c2ecb3b2b1ea1d935d381079e362c01c3f4c52 Alex Feinberg committed Feb 2, 2013
Showing 704 changed files with 172,642 additions and 4,458 deletions.
View
@@ -81,6 +81,7 @@ if [ $# = 0 ]; then
echo " queue get information regarding JobQueues"
echo " version print the version"
echo " jar <jar> run a jar file"
+ echo " onejar <jar> run a jar file packaged using one-jar, do not specify name of main class"
echo " distcp <srcurl> <desturl> copy file or directories recursively"
echo " fastcopy <src file> <dest file> copy files by maintaining optimal locality"
echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
@@ -395,6 +396,9 @@ elif [ "$COMMAND" = "pipes" ] ; then
elif [ "$COMMAND" = "version" ] ; then
CLASS=org.apache.hadoop.util.VersionInfo
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "onejar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+ HADOOP_OPTS="$HADOOP_OPTS -Done-jar.jar.path=$1"
elif [ "$COMMAND" = "jar" ] ; then
CLASS=org.apache.hadoop.util.RunJar
elif [ "$COMMAND" = "fastcopy" ] ; then
@@ -493,4 +497,5 @@ fi
# run it
export CLASSPATH
+export JVM_PID=$$
exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $JMX_OPTS "-Dfb_hadoop_version=0.20" $CLASS $CMDLINE_OPTS "$@"
@@ -31,4 +31,7 @@ fi
export NOTIFIER_JMX_OPTS=" -Dcom.sun.management.jmxremote.port=$NOTIFIER_JMX_PORT -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+# use the thrift 0.7.0 jar in the class path
+export HADOOP_CLASSPATH=${HADOOP_HOME}/contrib/namespace-notifier/lib/libthrift-0.7.0.jar:${HADOOP_CLASSPATH}
+
"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start notifier
View
@@ -1343,6 +1343,8 @@
<zipgroupfileset dir="lib" includes="*jar"/>
<zipgroupfileset dir="build" includes="*jar"/>
<zipgroupfileset dir="build/contrib/corona" includes="*jar"/>
+ <zipgroupfileset dir="build/contrib/highavailability" includes="*jar"/>
+ <zipgroupfileset dir="build/contrib/raid" includes="*jar"/>
<zipgroupfileset dir="build/contrib/corona/lib" includes="*jar"/>
<zipgroupfileset dir="build/ivy/lib/Hadoop/common/" includes="*jar"/>
<zipgroupfileset dir="build/ivy/lib/corona/common/" includes="*jar"/>
@@ -17,4 +17,11 @@
<value>1</value>
</property>
+<property>
+ <name>dfs.datanode.flush_kb</name>
+ <value>1024</value>
+ <description>Enables datanode to call fsync to write to disk every 'value' number
+ of KB. To disable the feature, set the value to zero. The default is 1024 KB
+ </description>
+</property>
</configuration>
@@ -34,7 +34,7 @@ log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c{1}: %m%n
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
@@ -5,7 +5,7 @@
<property>
<name>dfs.http.address0</name>
- <value>localhost:50070</value>
+ <value>0.0.0.0:50070</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
If the port is 0 then the server will start on a free port.
@@ -14,7 +14,7 @@
<property>
<name>dfs.http.address1</name>
- <value>localhost:50080</value>
+ <value>0.0.0.0:50080</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
If the port is 0 then the server will start on a free port.
@@ -95,7 +95,7 @@
<property>
<name>dfs.namenode.dn-address0</name>
- <value>localhost:9005</value>
+ <value>0.0.0.0:9005</value>
<description>
The address and port to run the RPC server which will be processing
requests from datanodes in the cluster.
@@ -104,7 +104,7 @@
<property>
<name>dfs.namenode.dn-address1</name>
- <value>localhost:9006</value>
+ <value>0.0.0.0:9006</value>
<description>
The address and port to run the RPC server which will be processing
requests from datanodes in the cluster.
@@ -7,7 +7,7 @@
<property>
<name>fs.default.name</name>
- <value>hdfs://localhost:9000</value>
+ <value>hdfs://0.0.0.0:9000</value>
<description>The name of the default file system. A URI whose
scheme and authority determine the FileSystem implementation. The
uri's scheme determines the config property (fs.SCHEME.impl) naming
@@ -17,7 +17,7 @@
<property>
<name>fs.default.name0</name>
- <value>hdfs://localhost:9000</value>
+ <value>hdfs://0.0.0.0:9000</value>
<description>The name of the default file system. A URI whose
scheme and authority determine the FileSystem implementation. The
uri's scheme determines the config property (fs.SCHEME.impl) naming
@@ -27,7 +27,7 @@
<property>
<name>fs.default.name1</name>
- <value>hdfs://localhost:9010</value>
+ <value>hdfs://0.0.0.0:9010</value>
<description>The name of the default file system. A URI whose
scheme and authority determine the FileSystem implementation. The
uri's scheme determines the config property (fs.SCHEME.impl) naming
@@ -19,6 +19,8 @@ if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
# Extra Java runtime options. Empty by default.
# export HADOOP_OPTS=-server
+ export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
+
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
@@ -13,6 +13,9 @@ if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
# Extra Java CLASSPATH elements. Optional.
#export HADOOP_CLASSPATH=${HADOOP_TRUNK_MAIN}/VENDOR/hadoop-0.20/lib/
+ # use ipv4 if we can:
+ export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
+
# The maximum amount of heap to use, in MB. Default is 1000.
export HADOOP_HEAPSIZE=2000
@@ -18,6 +18,8 @@ if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
# Extra Java runtime options. Empty by default.
# export HADOOP_OPTS=-server
+
+ export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
@@ -108,7 +108,7 @@
<property>
<name>dfs.namenode.dn-address</name>
- <value>localhost:9015</value>
+ <value>0.0.0.0:9015</value>
<description>
The address and port to run the RPC server which will be processing
requests from datanodes in the cluster.
View
@@ -132,15 +132,17 @@
<!-- <dependency org="javax.servlet"
name="servlet-api"
rev="${servlet-api.version}"
- conf="jetty->master"/> -->
+ conf="jetty->master"/> -->
+
+<!-- These two will be resolved from lib/
<dependency org="org.mortbay.jetty"
name="jetty"
rev="${jetty.version}"
- conf="jetty->master"/>
+ conf="jetty->master"/>
<dependency org="org.mortbay.jetty"
name="jetty-util"
rev="${jetty-util.version}"
- conf="jetty->master"/>
+ conf="jetty->master"/> -->
<dependency org="tomcat"
name="jasper-runtime"
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -8,5 +8,6 @@
<starvingTimeForShare>60000</starvingTimeForShare>
<starvingTimeForMinimum>30000</starvingTimeForMinimum>
<redirect source="test.source" destination="group_a.pool_sla" />
+ <redirect source="group_a.source2" destination="group_a.pool_nonsla"/>
<redirect source="group_b.pool_redirectsource" destination="group_b.pool_sla" />
</configuration>
@@ -5,6 +5,7 @@
<minREDUCE>100</minREDUCE>
<maxMAP>200</maxMAP>
<maxREDUCE>200</maxREDUCE>
+ <redirectJobWithLimit source="pool_nonsla" destination="pool_sla" inputSizeLimit="1"/>
<pool name="pool_sla">
<minMAP>100</minMAP>
<minREDUCE>100</minREDUCE>
@@ -16,6 +17,15 @@
</pool>
<pool name="pool_nonsla">
</pool>
+ <pool name="pool_request_max">
+ <minMAP>1</minMAP>
+ <minREDUCE>1</minREDUCE>
+ <maxMAP>2</maxMAP>
+ <maxREDUCE>2</maxREDUCE>
+ <schedulingMode>FIFO</schedulingMode>
+ <preemptable>false</preemptable>
+ <requestMax>true</requestMax>
+ </pool>
</group>
<group name="group_b">
<maxMAP>200</maxMAP>
Oops, something went wrong.

0 comments on commit 73c2ecb

Please sign in to comment.