Permalink
Switch branches/tags
HADOOP-6659 HADOOP-6671-2 HADOOP-6671 HADOOP-6685 HADOOP-10388 HADOOP-11671 HADOOP-11890 HADOOP-12499 HADOOP-12756 HADOOP-12930 HADOOP-13037 HADOOP-13070 HADOOP-13128 HADOOP-13341 HADOOP-13345 HADOOP-15407 HADOOP-15461 HDDS-4 HDDS-48 HDDS-151 HDFS-265 HDFS-326 HDFS-347 HDFS-1052 HDFS-1073 HDFS-1312 HDFS-1623 HDFS-2006 HDFS-2802 HDFS-2832 HDFS-3042 HDFS-3077 HDFS-3092 HDFS-3140 HDFS-4685 HDFS-4949 HDFS-5442 HDFS-5535 HDFS-5698 HDFS-6581 HDFS-6584 HDFS-6994 HDFS-7240 HDFS-7285-REBASE HDFS-7285-backup HDFS-7285 HDFS-7836 HDFS-7966 HDFS-8707 HDFS-8966 HDFS-9806 HDFS-9924 HDFS-10285 HDFS-10467 HDFS-11118 HDFS-12090 HDFS-12943 HDFS-12996 HDFS-13532 HDFS-13572 HDFS-13891 HDFS-EC HEAD MAPREDUCE-6240-trunk MAPREDUCE-6608 MR-279-merge-to-trunk MR-279-merge MR-279 MR-2454 MR-2841 MR-3902 MR-4327 MR-6749 YARN-321-old YARN-321 YARN-1011 YARN-1051 YARN-1197 YARN-2139 YARN-2915 YARN-2928 YARN-3368_branch2 YARN-3368 YARN-3409 YARN-3866 YARN-3926 YARN-4752 YARN-4757 YARN-4902 YARN-5085 YARN-5355_branch2 YARN-5355 YARN-5501 YARN-5673 YARN-5734-branch-2 YARN-5734-branch-3.0 YARN-5734 YARN-5881 YARN-5972 YARN-6592 YARN-6828 YARN-7055 YARN-7402 YARN-8006 YARN-8200 YARN-8310.branch-3.1 branch-0.1 branch-0.2 branch-0.3 branch-0.4 branch-0.5 branch-0.6 branch-0.7 branch-0.8 branch-0.9 branch-0.10 branch-0.11 branch-0.12 branch-0.13 branch-0.14 branch-0.15 branch-0.16 branch-0.17 branch-0.18 branch-0.19 branch-0.20-append branch-0.20-security-202 branch-0.20-security-203 branch-0.20-security-204 branch-0.20-security-205 branch-0.20-security-patches branch-0.20-security branch-0.20 branch-0.20.203 branch-0.20.204 branch-0.20.205 branch-0.21-old branch-0.21 branch-0.22 branch-0.23-PB-merge branch-0.23-PB-merge2 branch-0.23-PB-merge3 branch-0.23-PB branch-0.23 branch-0.23.0-rc0 branch-0.23.0 branch-0.23.1 branch-0.23.2 branch-0.23.3 branch-0.23.4 branch-0.23.5 branch-0.23.6 branch-0.23.7 branch-0.23.8 branch-0.23.9 branch-0.23.10 branch-0.23.11 branch-1-win branch-1 branch-1.0 branch-1.0.2 branch-1.1 branch-1.2 branch-2-jhung-test branch-2 branch-2.0-alpha branch-2.0.0-alpha branch-2.0.1-alpha branch-2.0.2-alpha branch-2.0.3-alpha branch-2.0.4-alpha branch-2.0.5-alpha branch-2.0.5 branch-2.0.6-alpha branch-2.1-beta branch-2.1.0-alpha branch-2.1.0-beta branch-2.1.1-beta branch-2.2 branch-2.2.0 branch-2.2.1 branch-2.3 branch-2.3.0 branch-2.4 branch-2.4.0 branch-2.4.1 branch-2.5 branch-2.5.0 branch-2.5.1 branch-2.5.2 branch-2.6 branch-2.6.0 branch-2.6.1 branch-2.6.2 branch-2.6.3 branch-2.6.4 branch-2.6.5 branch-2.7 branch-2.7.0 branch-2.7.1 branch-2.7.2 branch-2.7.3 branch-2.7.4 branch-2.7.5 branch-2.7.6 branch-2.7.7 branch-2.8 branch-2.8.0 branch-2.8.1 branch-2.8.2 branch-2.8.3 branch-2.8.4 branch-2.8.5 branch-2.9 branch-2.9.0 branch-2.9.1 branch-2.9.2 branch-3 branch-3.0 branch-3.0.0-alpha1 branch-3.0.0-alpha2 branch-3.0.0-alpha3 branch-3.0.0-alpha4 branch-3.0.0-beta1 branch-3.0.0 branch-3.0.1 branch-3.0.2 branch-3.0.3 branch-3.1 branch-3.1.0 branch-3.1.1 branch-3.2 branch-3.2.0 branch-X.Y branch-trunk-win branch-yarn-3926 docker-hadoop-2 docker-hadoop-3 docker-hadoop-runner feature-HDFS-8286 feature-YARN-2928 fs-encryption master new-branch-3.1-03.17.2018 ozone-0.2 ozone-0.3 pre-HADOOP-4687 resource-types saved-branch-3.1-03172018 squashed-YARN-4752 trunk yahoo-merge yarn-2877 yarn-4719 yarn-4726 yarn-native-services
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
executable file 308 lines (278 sloc) 12 KB
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Environment Variables
#
# JSVC_HOME home directory of jsvc binary. Required for starting secure
# datanode.
#
# JSVC_OUTFILE path to jsvc output file. Defaults to
# $HADOOP_LOG_DIR/jsvc.out.
#
# JSVC_ERRFILE path to jsvc error file. Defaults to $HADOOP_LOG_DIR/jsvc.err.
bin=`which $0`
bin=`dirname ${bin}`
bin=`cd "$bin" > /dev/null; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
function print_usage(){
echo "Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND"
echo " where COMMAND is one of:"
echo " dfs run a filesystem command on the file systems supported in Hadoop."
echo " classpath prints the classpath"
echo " namenode -format format the DFS filesystem"
echo " secondarynamenode run the DFS secondary namenode"
echo " namenode run the DFS namenode"
echo " journalnode run the DFS journalnode"
echo " zkfc run the ZK Failover Controller daemon"
echo " datanode run a DFS datanode"
echo " debug run a Debug Admin to execute HDFS debug commands"
echo " dfsadmin run a DFS admin client"
echo " haadmin run a DFS HA admin client"
echo " fsck run a DFS filesystem checking utility"
echo " balancer run a cluster balancing utility"
echo " jmxget get JMX exported values from NameNode or DataNode."
echo " mover run a utility to move block replicas across"
echo " storage types"
echo " oiv apply the offline fsimage viewer to an fsimage"
echo " oiv_legacy apply the offline fsimage viewer to an legacy fsimage"
echo " oev apply the offline edits viewer to an edits file"
echo " fetchdt fetch a delegation token from the NameNode"
echo " getconf get config values from configuration"
echo " groups get the groups which users belong to"
echo " snapshotDiff diff two snapshots of a directory or diff the"
echo " current directory contents with a snapshot"
echo " lsSnapshottableDir list all snapshottable dirs owned by the current user"
echo " Use -help to see options"
echo " portmap run a portmap service"
echo " nfs3 run an NFS version 3 gateway"
echo " cacheadmin configure the HDFS cache"
echo " crypto configure HDFS encryption zones"
echo " storagepolicies list/get/set block storage policies"
echo " version print the version"
echo ""
echo "Most commands print help when invoked w/o parameters."
# There are also debug commands, but they don't show up in this listing.
}
if [ $# = 0 ]; then
print_usage
exit
fi
COMMAND=$1
shift
case $COMMAND in
# usage flags
--help|-help|-h)
print_usage
exit
;;
esac
# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
if [ -n "$JSVC_HOME" ]; then
if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
fi
if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
fi
HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
starting_secure_dn="true"
else
echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
"isn't set. Falling back to starting insecure DN."
fi
fi
# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables
if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
if [ -n "$JSVC_HOME" ]; then
if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then
HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
fi
if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then
HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
fi
HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
starting_privileged_nfs="true"
else
echo "It looks like you're trying to start a privileged NFS server, but"\
"\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server."
fi
fi
if [ "$COMMAND" = "namenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
elif [ "$COMMAND" = "zkfc" ] ; then
CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
elif [ "$COMMAND" = "secondarynamenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
elif [ "$COMMAND" = "datanode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
if [ "$starting_secure_dn" = "true" ]; then
HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
else
HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
fi
elif [ "$COMMAND" = "journalnode" ] ; then
CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS"
elif [ "$COMMAND" = "dfs" ] ; then
CLASS=org.apache.hadoop.fs.FsShell
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "dfsadmin" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "haadmin" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
CLASSPATH=${CLASSPATH}:${TOOL_PATH}
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "fsck" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DFSck
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "balancer" ] ; then
CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
elif [ "$COMMAND" = "mover" ] ; then
CLASS=org.apache.hadoop.hdfs.server.mover.Mover
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
elif [ "$COMMAND" = "storagepolicies" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
elif [ "$COMMAND" = "jmxget" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.JMXGet
elif [ "$COMMAND" = "oiv" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
elif [ "$COMMAND" = "oiv_legacy" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
elif [ "$COMMAND" = "oev" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
elif [ "$COMMAND" = "fetchdt" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
elif [ "$COMMAND" = "getconf" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.GetConf
elif [ "$COMMAND" = "groups" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.GetGroups
elif [ "$COMMAND" = "snapshotDiff" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
elif [ "$COMMAND" = "portmap" ] ; then
CLASS=org.apache.hadoop.portmap.Portmap
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS"
elif [ "$COMMAND" = "nfs3" ] ; then
CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS"
elif [ "$COMMAND" = "cacheadmin" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
elif [ "$COMMAND" = "crypto" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
elif [ "$COMMAND" = "version" ] ; then
CLASS=org.apache.hadoop.util.VersionInfo
elif [ "$COMMAND" = "debug" ]; then
CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
elif [ "$COMMAND" = "classpath" ]; then
if [ "$#" -gt 0 ]; then
CLASS=org.apache.hadoop.util.Classpath
else
# No need to bother starting up a JVM for this simple case.
if [ "$cygwin" = true ]; then
CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
fi
echo $CLASSPATH
exit 0
fi
else
CLASS="$COMMAND"
fi
# cygwin path translation
if [ "$cygwin" = true ]; then
CLASSPATH=$(cygpath -p -w "$CLASSPATH" 2>/dev/null)
HADOOP_LOG_DIR=$(cygpath -w "$HADOOP_LOG_DIR" 2>/dev/null)
HADOOP_PREFIX=$(cygpath -w "$HADOOP_PREFIX" 2>/dev/null)
HADOOP_CONF_DIR=$(cygpath -w "$HADOOP_CONF_DIR" 2>/dev/null)
HADOOP_COMMON_HOME=$(cygpath -w "$HADOOP_COMMON_HOME" 2>/dev/null)
HADOOP_HDFS_HOME=$(cygpath -w "$HADOOP_HDFS_HOME" 2>/dev/null)
HADOOP_YARN_HOME=$(cygpath -w "$HADOOP_YARN_HOME" 2>/dev/null)
HADOOP_MAPRED_HOME=$(cygpath -w "$HADOOP_MAPRED_HOME" 2>/dev/null)
fi
export CLASSPATH=$CLASSPATH
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
# Check to see if we should start a secure datanode
if [ "$starting_secure_dn" = "true" ]; then
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
else
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
fi
JSVC=$JSVC_HOME/jsvc
if [ ! -f $JSVC ]; then
echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. "
echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
"and set JSVC_HOME to the directory containing the jsvc binary."
exit
fi
if [[ ! $JSVC_OUTFILE ]]; then
JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out"
fi
if [[ ! $JSVC_ERRFILE ]]; then
JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err"
fi
exec "$JSVC" \
-Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
-errfile "$JSVC_ERRFILE" \
-pidfile "$HADOOP_SECURE_DN_PID" \
-nodetach \
-user "$HADOOP_SECURE_DN_USER" \
-cp "$CLASSPATH" \
$JAVA_HEAP_MAX $HADOOP_OPTS \
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
elif [ "$starting_privileged_nfs" = "true" ] ; then
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid"
else
HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid"
fi
JSVC=$JSVC_HOME/jsvc
if [ ! -f $JSVC ]; then
echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. "
echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
"and set JSVC_HOME to the directory containing the jsvc binary."
exit
fi
if [[ ! $JSVC_OUTFILE ]]; then
JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out"
fi
if [[ ! $JSVC_ERRFILE ]]; then
JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err"
fi
exec "$JSVC" \
-Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \
-errfile "$JSVC_ERRFILE" \
-pidfile "$HADOOP_PRIVILEGED_NFS_PID" \
-nodetach \
-user "$HADOOP_PRIVILEGED_NFS_USER" \
-cp "$CLASSPATH" \
$JAVA_HEAP_MAX $HADOOP_OPTS \
org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@"
else
# run it
exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
fi