Permalink
Switch branches/tags
HADOOP-6659 HADOOP-6671-2 HADOOP-6671 HADOOP-6685 HADOOP-10388 HADOOP-11671 HADOOP-11890 HADOOP-12499 HADOOP-12756 HADOOP-12930 HADOOP-13037 HADOOP-13070 HADOOP-13128 HADOOP-13341 HADOOP-13345 HADOOP-15407 HADOOP-15461 HDDS-4 HDDS-48 HDDS-151 HDFS-265 HDFS-326 HDFS-347 HDFS-1052 HDFS-1073 HDFS-1312 HDFS-1623 HDFS-2006 HDFS-2802 HDFS-2832 HDFS-3042 HDFS-3077 HDFS-3092 HDFS-3140 HDFS-4685 HDFS-4949 HDFS-5442 HDFS-5535 HDFS-5698 HDFS-6581 HDFS-6584 HDFS-6994 HDFS-7240 HDFS-7285-REBASE HDFS-7285-backup HDFS-7285 HDFS-7836 HDFS-7966 HDFS-8707 HDFS-8966 HDFS-9806 HDFS-9924 HDFS-10285 HDFS-10467 HDFS-11118 HDFS-12090 HDFS-12943 HDFS-12996 HDFS-13532 HDFS-13572 HDFS-13891 HDFS-EC HEAD MAPREDUCE-6240-trunk MAPREDUCE-6608 MR-279-merge-to-trunk MR-279-merge MR-279 MR-2454 MR-2841 MR-3902 MR-4327 MR-6749 YARN-321-old YARN-321 YARN-1011 YARN-1051 YARN-1197 YARN-2139 YARN-2915 YARN-2928 YARN-3368_branch2 YARN-3368 YARN-3409 YARN-3866 YARN-3926 YARN-4752 YARN-4757 YARN-4902 YARN-5085 YARN-5355_branch2 YARN-5355 YARN-5501 YARN-5673 YARN-5734-branch-2 YARN-5734-branch-3.0 YARN-5734 YARN-5881 YARN-5972 YARN-6592 YARN-6828 YARN-7055 YARN-7402 YARN-8006 YARN-8200 YARN-8310.branch-3.1 branch-0.1 branch-0.2 branch-0.3 branch-0.4 branch-0.5 branch-0.6 branch-0.7 branch-0.8 branch-0.9 branch-0.10 branch-0.11 branch-0.12 branch-0.13 branch-0.14 branch-0.15 branch-0.16 branch-0.17 branch-0.18 branch-0.19 branch-0.20-append branch-0.20-security-202 branch-0.20-security-203 branch-0.20-security-204 branch-0.20-security-205 branch-0.20-security-patches branch-0.20-security branch-0.20 branch-0.20.203 branch-0.20.204 branch-0.20.205 branch-0.21-old branch-0.21 branch-0.22 branch-0.23-PB-merge branch-0.23-PB-merge2 branch-0.23-PB-merge3 branch-0.23-PB branch-0.23 branch-0.23.0-rc0 branch-0.23.0 branch-0.23.1 branch-0.23.2 branch-0.23.3 branch-0.23.4 branch-0.23.5 branch-0.23.6 branch-0.23.7 branch-0.23.8 branch-0.23.9 branch-0.23.10 branch-0.23.11 branch-1-win branch-1 branch-1.0 branch-1.0.2 branch-1.1 branch-1.2 branch-2-jhung-test branch-2 branch-2.0-alpha branch-2.0.0-alpha branch-2.0.1-alpha branch-2.0.2-alpha branch-2.0.3-alpha branch-2.0.4-alpha branch-2.0.5-alpha branch-2.0.5 branch-2.0.6-alpha branch-2.1-beta branch-2.1.0-alpha branch-2.1.0-beta branch-2.1.1-beta branch-2.2 branch-2.2.0 branch-2.2.1 branch-2.3 branch-2.3.0 branch-2.4 branch-2.4.0 branch-2.4.1 branch-2.5 branch-2.5.0 branch-2.5.1 branch-2.5.2 branch-2.6 branch-2.6.0 branch-2.6.1 branch-2.6.2 branch-2.6.3 branch-2.6.4 branch-2.6.5 branch-2.7 branch-2.7.0 branch-2.7.1 branch-2.7.2 branch-2.7.3 branch-2.7.4 branch-2.7.5 branch-2.7.6 branch-2.7.7 branch-2.8 branch-2.8.0 branch-2.8.1 branch-2.8.2 branch-2.8.3 branch-2.8.4 branch-2.8.5 branch-2.9 branch-2.9.0 branch-2.9.1 branch-2.9.2 branch-3 branch-3.0 branch-3.0.0-alpha1 branch-3.0.0-alpha2 branch-3.0.0-alpha3 branch-3.0.0-alpha4 branch-3.0.0-beta1 branch-3.0.0 branch-3.0.1 branch-3.0.2 branch-3.0.3 branch-3.1 branch-3.1.0 branch-3.1.1 branch-3.2 branch-3.2.0 branch-X.Y branch-trunk-win branch-yarn-3926 docker-hadoop-2 docker-hadoop-3 docker-hadoop-runner feature-HDFS-8286 feature-YARN-2928 fs-encryption master new-branch-3.1-03.17.2018 ozone-0.2 ozone-0.3 pre-HADOOP-4687 resource-types saved-branch-3.1-03172018 squashed-YARN-4752 trunk yahoo-merge yarn-2877 yarn-4719 yarn-4726 yarn-native-services
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
executable file 119 lines (95 sloc) 3.65 KB
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Start hadoop dfs daemons.
# Optinally upgrade or rollback dfs state.
# Run this on master node.
usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
# get arguments
if [[ $# -ge 1 ]]; then
startOpt="$1"
shift
case "$startOpt" in
-upgrade)
nameStartOpt="$startOpt"
;;
-rollback)
dataStartOpt="$startOpt"
;;
*)
echo $usage
exit 1
;;
esac
fi
#Add other possible options
nameStartOpt="$nameStartOpt $@"
#---------------------------------------------------------
# namenodes
NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
echo "Starting namenodes on [$NAMENODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$NAMENODES" \
--script "$bin/hdfs" start namenode $nameStartOpt
#---------------------------------------------------------
# datanodes (using default slaves file)
if [ -n "$HADOOP_SECURE_DN_USER" ]; then
echo \
"Attempting to start secure cluster, skipping datanodes. " \
"Run start-secure-dns.sh as root to complete startup."
else
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--script "$bin/hdfs" start datanode $dataStartOpt
fi
#---------------------------------------------------------
# secondary namenodes (if any)
SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
if [ -n "$SECONDARY_NAMENODES" ]; then
echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" start secondarynamenode
fi
#---------------------------------------------------------
# quorumjournal nodes (if any)
SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
case "$SHARED_EDITS_DIR" in
qjournal://*)
JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
echo "Starting journal nodes [$JOURNAL_NODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$JOURNAL_NODES" \
--script "$bin/hdfs" start journalnode ;;
esac
#---------------------------------------------------------
# ZK Failover controllers, if auto-HA is enabled
AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$NAMENODES" \
--script "$bin/hdfs" start zkfc
fi
# eof