Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

Updating demo to grep my log files

  • Loading branch information...
commit fb8eed29cdcec35e138f3f8faef91e156db16981 1 parent 6dec564
Matthew McCullough authored February 27, 2010
98  mapreduce-example/input/capacity-scheduler.xml
... ...
@@ -1,98 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-
3  
-<!-- This is the configuration file for the resource manager in Hadoop. -->
4  
-<!-- You can configure various scheduling parameters related to queues. -->
5  
-<!-- The properties for a queue follow a naming convention,such as, -->
6  
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
7  
-
8  
-<configuration>
9  
-
10  
-  <property>
11  
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
12  
-    <value>100</value>
13  
-    <description>Percentage of the number of slots in the cluster that are
14  
-      to be available for jobs in this queue.
15  
-    </description>    
16  
-  </property>
17  
-  
18  
-  <property>
19  
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
20  
-    <value>false</value>
21  
-    <description>If true, priorities of jobs will be taken into 
22  
-      account in scheduling decisions.
23  
-    </description>
24  
-  </property>
25  
-
26  
-  <property>
27  
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
28  
-    <value>100</value>
29  
-    <description> Each queue enforces a limit on the percentage of resources 
30  
-    allocated to a user at any given time, if there is competition for them. 
31  
-    This user limit can vary between a minimum and maximum value. The former
32  
-    depends on the number of users who have submitted jobs, and the latter is
33  
-    set to this property value. For example, suppose the value of this 
34  
-    property is 25. If two users have submitted jobs to a queue, no single 
35  
-    user can use more than 50% of the queue resources. If a third user submits
36  
-    a job, no single user can use more than 33% of the queue resources. With 4 
37  
-    or more users, no user can use more than 25% of the queue's resources. A 
38  
-    value of 100 implies no user limits are imposed. 
39  
-    </description>
40  
-  </property>
41  
-  <property>
42  
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user</name>
43  
-    <value>2</value>
44  
-    <description>The maximum number of jobs to be pre-initialized for a user
45  
-    of the job queue.
46  
-    </description>
47  
-  </property>
48  
-  
49  
-  <!-- The default configuration settings for the capacity task scheduler -->
50  
-  <!-- The default values would be applied to all the queues which don't have -->
51  
-  <!-- the appropriate property for the particular queue -->
52  
-  <property>
53  
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
54  
-    <value>false</value>
55  
-    <description>If true, priorities of jobs will be taken into 
56  
-      account in scheduling decisions by default in a job queue.
57  
-    </description>
58  
-  </property>
59  
-  
60  
-  <property>
61  
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
62  
-    <value>100</value>
63  
-    <description>The percentage of the resources limited to a particular user
64  
-      for the job queue at any given point of time by default.
65  
-    </description>
66  
-  </property>
67  
-
68  
-  <property>
69  
-    <name>mapred.capacity-scheduler.default-maximum-initialized-jobs-per-user</name>
70  
-    <value>2</value>
71  
-    <description>The maximum number of jobs to be pre-initialized for a user
72  
-    of the job queue.
73  
-    </description>
74  
-  </property>
75  
-
76  
-
77  
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
78  
-  <property>
79  
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
80  
-    <value>5000</value>
81  
-    <description>The amount of time in miliseconds which is used to poll 
82  
-    the job queues for jobs to initialize.
83  
-    </description>
84  
-  </property>
85  
-  <property>
86  
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
87  
-    <value>5</value>
88  
-    <description>Number of worker threads which would be used by
89  
-    Initialization poller to initialize jobs in a set of queue.
90  
-    If number mentioned in property is equal to number of job queues
91  
-    then a single thread would initialize jobs in a queue. If lesser
92  
-    then a thread would get a set of queues assigned. If the number
93  
-    is greater then number of threads would be equal to number of 
94  
-    job queues.
95  
-    </description>
96  
-  </property>
97  
-
98  
-</configuration>
24  mapreduce-example/input/configuration.xsl
... ...
@@ -1,24 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
3  
-<xsl:output method="html"/>
4  
-<xsl:template match="configuration">
5  
-<html>
6  
-<body>
7  
-<table border="1">
8  
-<tr>
9  
- <td>name</td>
10  
- <td>value</td>
11  
- <td>description</td>
12  
-</tr>
13  
-<xsl:for-each select="property">
14  
-<tr>
15  
-  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
16  
-  <td><xsl:value-of select="value"/></td>
17  
-  <td><xsl:value-of select="description"/></td>
18  
-</tr>
19  
-</xsl:for-each>
20  
-</table>
21  
-</body>
22  
-</html>
23  
-</xsl:template>
24  
-</xsl:stylesheet>
8  mapreduce-example/input/core-site.xml
... ...
@@ -1,8 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3  
-
4  
-<!-- Put site-specific property overrides in this file. -->
5  
-
6  
-<configuration>
7  
-
8  
-</configuration>
956  mapreduce-example/input/ex100220.log
956 additions, 0 deletions not shown
1,226  mapreduce-example/input/ex100221.log
1226 additions, 0 deletions not shown
1,554  mapreduce-example/input/ex100222.log
1554 additions, 0 deletions not shown
1,234  mapreduce-example/input/ex100223.log
1234 additions, 0 deletions not shown
1,387  mapreduce-example/input/ex100224.log
1387 additions, 0 deletions not shown
1,134  mapreduce-example/input/ex100225.log
1134 additions, 0 deletions not shown
1,137  mapreduce-example/input/ex100226.log
1137 additions, 0 deletions not shown
54  mapreduce-example/input/hadoop-env.sh
... ...
@@ -1,54 +0,0 @@
1  
-# Set Hadoop-specific environment variables here.
2  
-
3  
-# The only required environment variable is JAVA_HOME.  All others are
4  
-# optional.  When running a distributed configuration it is best to
5  
-# set JAVA_HOME in this file, so that it is correctly defined on
6  
-# remote nodes.
7  
-
8  
-# The java implementation to use.  Required.
9  
-# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
10  
-
11  
-# Extra Java CLASSPATH elements.  Optional.
12  
-# export HADOOP_CLASSPATH=
13  
-
14  
-# The maximum amount of heap to use, in MB. Default is 1000.
15  
-# export HADOOP_HEAPSIZE=2000
16  
-
17  
-# Extra Java runtime options.  Empty by default.
18  
-# export HADOOP_OPTS=-server
19  
-
20  
-# Command specific options appended to HADOOP_OPTS when specified
21  
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
22  
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
23  
-export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
24  
-export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
25  
-export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
26  
-# export HADOOP_TASKTRACKER_OPTS=
27  
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
28  
-# export HADOOP_CLIENT_OPTS
29  
-
30  
-# Extra ssh options.  Empty by default.
31  
-# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
32  
-
33  
-# Where log files are stored.  $HADOOP_HOME/logs by default.
34  
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
35  
-
36  
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
37  
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
38  
-
39  
-# host:path where hadoop code should be rsync'd from.  Unset by default.
40  
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
41  
-
42  
-# Seconds to sleep between slave commands.  Unset by default.  This
43  
-# can be useful in large clusters, where, e.g., slave rsyncs can
44  
-# otherwise arrive faster than the master can service them.
45  
-# export HADOOP_SLAVE_SLEEP=0.1
46  
-
47  
-# The directory where pid files are stored. /tmp by default.
48  
-# export HADOOP_PID_DIR=/var/hadoop/pids
49  
-
50  
-# A string representing this instance of hadoop. $USER by default.
51  
-# export HADOOP_IDENT_STRING=$USER
52  
-
53  
-# The scheduling priority for daemon processes.  See 'man nice'.
54  
-# export HADOOP_NICENESS=10
40  mapreduce-example/input/hadoop-metrics.properties
... ...
@@ -1,40 +0,0 @@
1  
-# Configuration of the "dfs" context for null
2  
-dfs.class=org.apache.hadoop.metrics.spi.NullContext
3  
-
4  
-# Configuration of the "dfs" context for file
5  
-#dfs.class=org.apache.hadoop.metrics.file.FileContext
6  
-#dfs.period=10
7  
-#dfs.fileName=/tmp/dfsmetrics.log
8  
-
9  
-# Configuration of the "dfs" context for ganglia
10  
-# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
11  
-# dfs.period=10
12  
-# dfs.servers=localhost:8649
13  
-
14  
-
15  
-# Configuration of the "mapred" context for null
16  
-mapred.class=org.apache.hadoop.metrics.spi.NullContext
17  
-
18  
-# Configuration of the "mapred" context for file
19  
-#mapred.class=org.apache.hadoop.metrics.file.FileContext
20  
-#mapred.period=10
21  
-#mapred.fileName=/tmp/mrmetrics.log
22  
-
23  
-# Configuration of the "mapred" context for ganglia
24  
-# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
25  
-# mapred.period=10
26  
-# mapred.servers=localhost:8649
27  
-
28  
-
29  
-# Configuration of the "jvm" context for null
30  
-jvm.class=org.apache.hadoop.metrics.spi.NullContext
31  
-
32  
-# Configuration of the "jvm" context for file
33  
-#jvm.class=org.apache.hadoop.metrics.file.FileContext
34  
-#jvm.period=10
35  
-#jvm.fileName=/tmp/jvmmetrics.log
36  
-
37  
-# Configuration of the "jvm" context for ganglia
38  
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
39  
-# jvm.period=10
40  
-# jvm.servers=localhost:8649
97  mapreduce-example/input/hadoop-policy.xml
... ...
@@ -1,97 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3  
-
4  
-<!-- Put site-specific property overrides in this file. -->
5  
-
6  
-<configuration>
7  
-  <property>
8  
-    <name>security.client.protocol.acl</name>
9  
-    <value>*</value>
10  
-    <description>ACL for ClientProtocol, which is used by user code 
11  
-    via the DistributedFileSystem. 
12  
-    The ACL is a comma-separated list of user and group names. The user and 
13  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
14  
-    A special value of "*" means all users are allowed.</description>
15  
-  </property>
16  
-
17  
-  <property>
18  
-    <name>security.client.datanode.protocol.acl</name>
19  
-    <value>*</value>
20  
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol 
21  
-    for block recovery.
22  
-    The ACL is a comma-separated list of user and group names. The user and 
23  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
24  
-    A special value of "*" means all users are allowed.</description>
25  
-  </property>
26  
-
27  
-  <property>
28  
-    <name>security.datanode.protocol.acl</name>
29  
-    <value>*</value>
30  
-    <description>ACL for DatanodeProtocol, which is used by datanodes to 
31  
-    communicate with the namenode.
32  
-    The ACL is a comma-separated list of user and group names. The user and 
33  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
34  
-    A special value of "*" means all users are allowed.</description>
35  
-  </property>
36  
-
37  
-  <property>
38  
-    <name>security.inter.datanode.protocol.acl</name>
39  
-    <value>*</value>
40  
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
41  
-    for updating generation timestamp.
42  
-    The ACL is a comma-separated list of user and group names. The user and 
43  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
44  
-    A special value of "*" means all users are allowed.</description>
45  
-  </property>
46  
-
47  
-  <property>
48  
-    <name>security.namenode.protocol.acl</name>
49  
-    <value>*</value>
50  
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
51  
-    namenode to communicate with the namenode.
52  
-    The ACL is a comma-separated list of user and group names. The user and 
53  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
54  
-    A special value of "*" means all users are allowed.</description>
55  
-  </property>
56  
-
57  
-  <property>
58  
-    <name>security.inter.tracker.protocol.acl</name>
59  
-    <value>*</value>
60  
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to 
61  
-    communicate with the jobtracker.
62  
-    The ACL is a comma-separated list of user and group names. The user and 
63  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
64  
-    A special value of "*" means all users are allowed.</description>
65  
-  </property>
66  
-
67  
-  <property>
68  
-    <name>security.job.submission.protocol.acl</name>
69  
-    <value>*</value>
70  
-    <description>ACL for JobSubmissionProtocol, used by job clients to 
71  
-    communciate with the jobtracker for job submission, querying job status etc.
72  
-    The ACL is a comma-separated list of user and group names. The user and 
73  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
74  
-    A special value of "*" means all users are allowed.</description>
75  
-  </property>
76  
-
77  
-  <property>
78  
-    <name>security.task.umbilical.protocol.acl</name>
79  
-    <value>*</value>
80  
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce 
81  
-    tasks to communicate with the parent tasktracker. 
82  
-    The ACL is a comma-separated list of user and group names. The user and 
83  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
84  
-    A special value of "*" means all users are allowed.</description>
85  
-  </property>
86  
-
87  
-  <property>
88  
-    <name>security.refresh.policy.protocol.acl</name>
89  
-    <value>*</value>
90  
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the 
91  
-    dfsadmin and mradmin commands to refresh the security policy in-effect. 
92  
-    The ACL is a comma-separated list of user and group names. The user and 
93  
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
94  
-    A special value of "*" means all users are allowed.</description>
95  
-  </property>
96  
-
97  
-</configuration>
20  mapreduce-example/input/hdfs-site.xml
... ...
@@ -1,20 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3  
-
4  
-<!-- Put site-specific property overrides in this file. -->
5  
-
6  
-<configuration>
7  
-  <!-- Matthew 2010-01-13 -->
8  
-  <property>
9  
-         <name>fs.default.name</name>
10  
-         <value>localhost:9000</value>
11  
-  </property>
12  
-  <property>
13  
-         <name>mapred.job.tracker</name>
14  
-         <value>localhost:9001</value>
15  
-  </property>
16  
-  <property>
17  
-        <name>dfs.replication</name>
18  
-        <value>1</value>
19  
-  </property>
20  
-</configuration>
94  mapreduce-example/input/log4j.properties
... ...
@@ -1,94 +0,0 @@
1  
-# Define some default values that can be overridden by system properties
2  
-hadoop.root.logger=INFO,console
3  
-hadoop.log.dir=.
4  
-hadoop.log.file=hadoop.log
5  
-
6  
-# Define the root logger to the system property "hadoop.root.logger".
7  
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
8  
-
9  
-# Logging Threshold
10  
-log4j.threshhold=ALL
11  
-
12  
-#
13  
-# Daily Rolling File Appender
14  
-#
15  
-
16  
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
17  
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
18  
-
19  
-# Rollver at midnight
20  
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
21  
-
22  
-# 30-day backup
23  
-#log4j.appender.DRFA.MaxBackupIndex=30
24  
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
25  
-
26  
-# Pattern format: Date LogLevel LoggerName LogMessage
27  
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
28  
-# Debugging Pattern format
29  
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
30  
-
31  
-
32  
-#
33  
-# console
34  
-# Add "console" to rootlogger above if you want to use this 
35  
-#
36  
-
37  
-log4j.appender.console=org.apache.log4j.ConsoleAppender
38  
-log4j.appender.console.target=System.err
39  
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
40  
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
41  
-
42  
-#
43  
-# TaskLog Appender
44  
-#
45  
-
46  
-#Default values
47  
-hadoop.tasklog.taskid=null
48  
-hadoop.tasklog.noKeepSplits=4
49  
-hadoop.tasklog.totalLogFileSize=100
50  
-hadoop.tasklog.purgeLogSplits=true
51  
-hadoop.tasklog.logsRetainHours=12
52  
-
53  
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
54  
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
55  
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
56  
-
57  
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
58  
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
59  
-
60  
-#
61  
-# Rolling File Appender
62  
-#
63  
-
64  
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
65  
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
66  
-
67  
-# Logfile size and and 30-day backups
68  
-#log4j.appender.RFA.MaxFileSize=1MB
69  
-#log4j.appender.RFA.MaxBackupIndex=30
70  
-
71  
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
72  
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
73  
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
74  
-
75  
-#
76  
-# FSNamesystem Audit logging
77  
-# All audit events are logged at INFO level
78  
-#
79  
-log4j.logger.org.apache.hadoop.fs.FSNamesystem.audit=WARN
80  
-
81  
-# Custom Logging levels
82  
-
83  
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
84  
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
85  
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
86  
-
87  
-# Jets3t library
88  
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
89  
-
90  
-#
91  
-# Event Counter Appender
92  
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
93  
-#
94  
-log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter
8  mapreduce-example/input/mapred-site.xml
... ...
@@ -1,8 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3  
-
4  
-<!-- Put site-specific property overrides in this file. -->
5  
-
6  
-<configuration>
7  
-
8  
-</configuration>
1  mapreduce-example/input/masters
... ...
@@ -1 +0,0 @@
1  
-localhost
1  mapreduce-example/input/slaves
... ...
@@ -1 +0,0 @@
1  
-localhost
57  mapreduce-example/input/ssl-client.xml.example
... ...
@@ -1,57 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3  
-
4  
-<configuration>
5  
-
6  
-<property>
7  
-  <name>ssl.client.truststore.location</name>
8  
-  <value></value>
9  
-  <description>Truststore to be used by clients like distcp. Must be
10  
-  specified.
11  
-  </description>
12  
-</property>
13  
-
14  
-<property>
15  
-  <name>ssl.client.truststore.password</name>
16  
-  <value></value>
17  
-  <description>Optional. Default value is "".
18  
-  </description>
19  
-</property>
20  
-
21  
-<property>
22  
-  <name>ssl.client.truststore.type</name>
23  
-  <value>jks</value>
24  
-  <description>Optional. Default value is "jks".
25  
-  </description>
26  
-</property>
27  
-
28  
-<property>
29  
-  <name>ssl.client.keystore.location</name>
30  
-  <value></value>
31  
-  <description>Keystore to be used by clients like distcp. Must be
32  
-  specified.
33  
-  </description>
34  
-</property>
35  
-
36  
-<property>
37  
-  <name>ssl.client.keystore.password</name>
38  
-  <value></value>
39  
-  <description>Optional. Default value is "".
40  
-  </description>
41  
-</property>
42  
-
43  
-<property>
44  
-  <name>ssl.client.keystore.keypassword</name>
45  
-  <value></value>
46  
-  <description>Optional. Default value is "".
47  
-  </description>
48  
-</property>
49  
-
50  
-<property>
51  
-  <name>ssl.client.keystore.type</name>
52  
-  <value>jks</value>
53  
-  <description>Optional. Default value is "jks".
54  
-  </description>
55  
-</property>
56  
-
57  
-</configuration>
55  mapreduce-example/input/ssl-server.xml.example
... ...
@@ -1,55 +0,0 @@
1  
-<?xml version="1.0"?>
2  
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3  
-
4  
-<configuration>
5  
-
6  
-<property>
7  
-  <name>ssl.server.truststore.location</name>
8  
-  <value></value>
9  
-  <description>Truststore to be used by NN and DN. Must be specified.
10  
-  </description>
11  
-</property>
12  
-
13  
-<property>
14  
-  <name>ssl.server.truststore.password</name>
15  
-  <value></value>
16  
-  <description>Optional. Default value is "".
17  
-  </description>
18  
-</property>
19  
-
20  
-<property>
21  
-  <name>ssl.server.truststore.type</name>
22  
-  <value>jks</value>
23  
-  <description>Optional. Default value is "jks".
24  
-  </description>
25  
-</property>
26  
-
27  
-<property>
28  
-  <name>ssl.server.keystore.location</name>
29  
-  <value></value>
30  
-  <description>Keystore to be used by NN and DN. Must be specified.
31  
-  </description>
32  
-</property>
33  
-
34  
-<property>
35  
-  <name>ssl.server.keystore.password</name>
36  
-  <value></value>
37  
-  <description>Must be specified.
38  
-  </description>
39  
-</property>
40  
-
41  
-<property>
42  
-  <name>ssl.server.keystore.keypassword</name>
43  
-  <value></value>
44  
-  <description>Must be specified.
45  
-  </description>
46  
-</property>
47  
-
48  
-<property>
49  
-  <name>ssl.server.keystore.type</name>
50  
-  <value>jks</value>
51  
-  <description>Optional. Default value is "jks".
52  
-  </description>
53  
-</property>
54  
-
55  
-</configuration>
3  mapreduce-example/runme.sh
... ...
@@ -1 +1,2 @@
1  
-hadoop jar /Applications/Dev/hadoop-family/hadoop-0.20.1/hadoop-*-examples.jar grep input output 'dfs[a-z.]+'
  1
+rm -rf output
  2
+hadoop jar /Applications/Dev/hadoop-family/hadoop-0.20.1/hadoop-*-examples.jar grep input output '[a-z.]+html'

0 notes on commit fb8eed2

Please sign in to comment.
Something went wrong with that request. Please try again.