-
Notifications
You must be signed in to change notification settings - Fork 9k
/
Copy pathhadoop-functions.sh
executable file
·2575 lines (2306 loc) · 71.9 KB
/
hadoop-functions.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# we need to declare this globally as an array, which can only
# be done outside of a function
declare -a HADOOP_SUBCMD_USAGE
declare -a HADOOP_OPTION_USAGE
## @description Print a message to stderr
## @audience public
## @stability stable
## @replaceable no
## @param string
function hadoop_error
{
echo "$*" 1>&2
}
## @description Print a message to stderr if --debug is turned on
## @audience public
## @stability stable
## @replaceable no
## @param string
function hadoop_debug
{
if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
echo "DEBUG: $*" 1>&2
fi
}
## @description Given a filename or dir, return the absolute version of it
## @description This works as an alternative to readlink, which isn't
## @description portable.
## @audience public
## @stability stable
## @param fsobj
## @replaceable no
## @return 0 success
## @return 1 failure
## @return stdout abspath
function hadoop_abs
{
declare obj=$1
declare dir
declare fn
declare dirret
if [[ ! -e ${obj} ]]; then
return 1
elif [[ -d ${obj} ]]; then
dir=${obj}
else
dir=$(dirname -- "${obj}")
fn=$(basename -- "${obj}")
fn="/${fn}"
fi
dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
dirret=$?
if [[ ${dirret} = 0 ]]; then
echo "${dir}${fn}"
return 0
fi
return 1
}
## @description Given variable $1 delete $2 from it
## @audience public
## @stability stable
## @replaceable no
function hadoop_delete_entry
{
if [[ ${!1} =~ \ ${2}\ ]] ; then
hadoop_debug "Removing ${2} from ${1}"
eval "${1}"=\""${!1// ${2} }"\"
fi
}
## @description Given variable $1 add $2 to it
## @audience public
## @stability stable
## @replaceable no
function hadoop_add_entry
{
if [[ ! ${!1} =~ \ ${2}\ ]] ; then
hadoop_debug "Adding ${2} to ${1}"
#shellcheck disable=SC2140
eval "${1}"=\""${!1} ${2} "\"
fi
}
## @description Given variable $1 determine if $2 is in it
## @audience public
## @stability stable
## @replaceable no
## @return 0 = yes, 1 = no
function hadoop_verify_entry
{
# this unfortunately can't really be tested by bats. :(
# so if this changes, be aware that unit tests effectively
# do this function in them
[[ ${!1} =~ \ ${2}\ ]]
}
## @description Check if we are running with priv
## @description by default, this implementation looks for
## @description EUID=0. For OSes that have true priv
## @description separation, this should be something more complex
## @audience private
## @stability evolving
## @replaceable yes
## @return 1 = no priv
## @return 0 = priv
function hadoop_privilege_check
{
[[ "${EUID}" = 0 ]]
}
## @description Execute a command via su when running as root
## @description if the given user is found or exit with
## @description failure if not.
## @description otherwise just run it. (This is intended to
## @description be used by the start-*/stop-* scripts.)
## @audience private
## @stability evolving
## @replaceable yes
## @param user
## @param commandstring
## @return exitstatus
function hadoop_su
{
declare user=$1
shift
if hadoop_privilege_check; then
if hadoop_verify_user_resolves user; then
su -l "${user}" -- "$@"
else
hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
return 1
fi
else
"$@"
fi
}
## @description Execute a command via su when running as root
## @description with extra support for commands that might
## @description legitimately start as root (e.g., datanode)
## @description (This is intended to
## @description be used by the start-*/stop-* scripts.)
## @audience private
## @stability evolving
## @replaceable no
## @param user
## @param commandstring
## @return exitstatus
function hadoop_uservar_su
{
## startup matrix:
#
# if $EUID != 0, then exec
# if $EUID =0 then
# if hdfs_subcmd_user is defined, call hadoop_su to exec
# if hdfs_subcmd_user is not defined, error
#
# For secure daemons, this means both the secure and insecure env vars need to be
# defined. e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs
# This function will pick up the "normal" var, switch to that user, then
# execute the command which will then pick up the "secure" version.
#
declare program=$1
declare command=$2
shift 2
declare uprogram
declare ucommand
declare uvar
declare svar
if hadoop_privilege_check; then
uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
if [[ -n "${!uvar}" ]]; then
hadoop_su "${!uvar}" "$@"
elif [[ -n "${!svar}" ]]; then
## if we are here, then SECURE_USER with no USER defined
## we are already privileged, so just run the command and hope
## for the best
"$@"
else
hadoop_error "ERROR: Attempting to operate on ${program} ${command} as root"
hadoop_error "ERROR: but there is no ${uvar} defined. Aborting operation."
return 1
fi
else
"$@"
fi
}
## @description Add a subcommand to the usage output
## @audience private
## @stability evolving
## @replaceable no
## @param subcommand
## @param subcommanddesc
function hadoop_add_subcommand
{
local subcmd=$1
local text=$2
HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${text}"
((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
}
## @description Add an option to the usage output
## @audience private
## @stability evolving
## @replaceable no
## @param subcommand
## @param subcommanddesc
function hadoop_add_option
{
local option=$1
local text=$2
HADOOP_OPTION_USAGE[${HADOOP_OPTION_USAGE_COUNTER}]="${option}@${text}"
((HADOOP_OPTION_USAGE_COUNTER=HADOOP_OPTION_USAGE_COUNTER+1))
}
## @description Reset the usage information to blank
## @audience private
## @stability evolving
## @replaceable no
function hadoop_reset_usage
{
HADOOP_SUBCMD_USAGE=()
HADOOP_OPTION_USAGE=()
HADOOP_SUBCMD_USAGE_COUNTER=0
HADOOP_OPTION_USAGE_COUNTER=0
}
## @description Print a screen-size aware two-column output
## @audience private
## @stability evolving
## @replaceable no
## @param array
function hadoop_generic_columnprinter
{
declare -a input=("$@")
declare -i i=0
declare -i counter=0
declare line
declare text
declare option
declare giventext
declare -i maxoptsize
declare -i foldsize
declare -a tmpa
declare numcols
if [[ -n "${COLUMNS}" ]]; then
numcols=${COLUMNS}
else
numcols=$(tput cols) 2>/dev/null
fi
if [[ -z "${numcols}"
|| ! "${numcols}" =~ ^[0-9]+$ ]]; then
numcols=75
else
((numcols=numcols-5))
fi
while read -r line; do
tmpa[${counter}]=${line}
((counter=counter+1))
option=$(echo "${line}" | cut -f1 -d'@')
if [[ ${#option} -gt ${maxoptsize} ]]; then
maxoptsize=${#option}
fi
done < <(for text in "${input[@]}"; do
echo "${text}"
done | sort)
i=0
((foldsize=numcols-maxoptsize))
until [[ $i -eq ${#tmpa[@]} ]]; do
option=$(echo "${tmpa[$i]}" | cut -f1 -d'@')
giventext=$(echo "${tmpa[$i]}" | cut -f2 -d'@')
while read -r line; do
printf "%-${maxoptsize}s %-s\n" "${option}" "${line}"
option=" "
done < <(echo "${giventext}"| fold -s -w ${foldsize})
((i=i+1))
done
}
## @description generate standard usage output
## @description and optionally takes a class
## @audience private
## @stability evolving
## @replaceable no
## @param execname
## @param true|false
## @param [text to use in place of SUBCOMMAND]
function hadoop_generate_usage
{
local cmd=$1
local takesclass=$2
local subcmdtext=${3:-"SUBCOMMAND"}
local haveoptions
local optstring
local havesubs
local subcmdstring
cmd=${cmd##*/}
if [[ -n "${HADOOP_OPTION_USAGE_COUNTER}"
&& "${HADOOP_OPTION_USAGE_COUNTER}" -gt 0 ]]; then
haveoptions=true
optstring=" [OPTIONS]"
fi
if [[ -n "${HADOOP_SUBCMD_USAGE_COUNTER}"
&& "${HADOOP_SUBCMD_USAGE_COUNTER}" -gt 0 ]]; then
havesubs=true
subcmdstring=" ${subcmdtext} [${subcmdtext} OPTIONS]"
fi
echo "Usage: ${cmd}${optstring}${subcmdstring}"
if [[ ${takesclass} = true ]]; then
echo " or ${cmd}${optstring} CLASSNAME [CLASSNAME OPTIONS]"
echo " where CLASSNAME is a user-provided Java class"
fi
if [[ "${haveoptions}" = true ]]; then
echo ""
echo " OPTIONS is none or any of:"
echo ""
hadoop_generic_columnprinter "${HADOOP_OPTION_USAGE[@]}"
fi
if [[ "${havesubs}" = true ]]; then
echo ""
echo " ${subcmdtext} is one of:"
echo ""
hadoop_generic_columnprinter "${HADOOP_SUBCMD_USAGE[@]}"
echo ""
echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
fi
}
## @description Replace `oldvar` with `newvar` if `oldvar` exists.
## @audience public
## @stability stable
## @replaceable yes
## @param oldvar
## @param newvar
function hadoop_deprecate_envvar
{
local oldvar=$1
local newvar=$2
local oldval=${!oldvar}
local newval=${!newvar}
if [[ -n "${oldval}" ]]; then
hadoop_error "WARNING: ${oldvar} has been replaced by ${newvar}. Using value of ${oldvar}."
# shellcheck disable=SC2086
eval ${newvar}=\"${oldval}\"
# shellcheck disable=SC2086
newval=${oldval}
# shellcheck disable=SC2086
eval ${newvar}=\"${newval}\"
fi
}
## @description Declare `var` being used and print its value.
## @audience public
## @stability stable
## @replaceable yes
## @param var
function hadoop_using_envvar
{
local var=$1
local val=${!var}
if [[ -n "${val}" ]]; then
hadoop_debug "${var} = ${val}"
fi
}
## @description Create the directory 'dir'.
## @audience public
## @stability stable
## @replaceable yes
## @param dir
function hadoop_mkdir
{
local dir=$1
if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then
hadoop_error "WARNING: ${dir} does not exist. Creating."
if ! mkdir -p "${dir}"; then
hadoop_error "ERROR: Unable to create ${dir}. Aborting."
exit 1
fi
fi
}
## @description Bootstraps the Hadoop shell environment
## @audience private
## @stability evolving
## @replaceable no
function hadoop_bootstrap
{
# the root of the Hadoop installation
# See HADOOP-6255 for the expected directory structure layout
if [[ -n "${DEFAULT_LIBEXEC_DIR}" ]]; then
hadoop_error "WARNING: DEFAULT_LIBEXEC_DIR ignored. It has been replaced by HADOOP_DEFAULT_LIBEXEC_DIR."
fi
# By now, HADOOP_LIBEXEC_DIR should have been defined upstream
# We can piggyback off of that to figure out where the default
# HADOOP_FREFIX should be. This allows us to run without
# HADOOP_HOME ever being defined by a human! As a consequence
# HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
# env var within Hadoop.
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
hadoop_error "HADOOP_LIBEXEC_DIR is not defined. Exiting."
exit 1
fi
HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
HADOOP_HOME=${HADOOP_HOME:-$HADOOP_DEFAULT_PREFIX}
export HADOOP_HOME
#
# short-cuts. vendors may redefine these as well, preferably
# in hadoop-layouts.sh
#
HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
# by default, whatever we are about to run doesn't support
# daemonization
HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
# by default, we have not been self-re-execed
HADOOP_REEXECED_CMD=false
HADOOP_SUBCMD_SECURESERVICE=false
# This is the default we claim in hadoop-env.sh
JSVC_HOME=${JSVC_HOME:-"/usr/bin"}
# usage output set to zero
hadoop_reset_usage
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
# defaults
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
}
## @description Locate Hadoop's configuration directory
## @audience private
## @stability evolving
## @replaceable no
function hadoop_find_confdir
{
local conf_dir
# An attempt at compatibility with some Hadoop 1.x
# installs.
if [[ -e "${HADOOP_HOME}/conf/hadoop-env.sh" ]]; then
conf_dir="conf"
else
conf_dir="etc/hadoop"
fi
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_HOME}/${conf_dir}}"
hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
}
## @description Validate ${HADOOP_CONF_DIR}
## @audience public
## @stability stable
## @replaceable yes
## @return will exit on failure conditions
function hadoop_verify_confdir
{
# Check only log4j.properties by default.
# --loglevel does not work without logger settings in log4j.log4j.properties.
if [[ ! -f "${HADOOP_CONF_DIR}/log4j.properties" ]]; then
hadoop_error "WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete."
fi
}
## @description Import the hadoop-env.sh settings
## @audience private
## @stability evolving
## @replaceable no
function hadoop_exec_hadoopenv
{
if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
export HADOOP_ENV_PROCESSED=true
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
fi
}
## @description Import the replaced functions
## @audience private
## @stability evolving
## @replaceable no
function hadoop_exec_userfuncs
{
if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
# shellcheck disable=SC1090
. "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
fi
}
## @description Read the user's settings. This provides for users to
## @description override and/or append hadoop-env.sh. It is not meant
## @description as a complete system override.
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_exec_user_hadoopenv
{
if [[ -f "${HOME}/.hadoop-env" ]]; then
hadoop_debug "Applying the user's .hadoop-env"
# shellcheck disable=SC1090
. "${HOME}/.hadoop-env"
fi
}
## @description Read the user's settings. This provides for users to
## @description run Hadoop Shell API after system bootstrap
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_exec_hadooprc
{
if [[ -f "${HOME}/.hadooprc" ]]; then
hadoop_debug "Applying the user's .hadooprc"
# shellcheck disable=SC1090
. "${HOME}/.hadooprc"
fi
}
## @description Import shellprofile.d content
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_import_shellprofiles
{
local i
local files1
local files2
if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
hadoop_debug "shellprofiles: ${files1[*]}"
else
hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
fi
if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then
files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
fi
# enable bundled shellprofiles that come
# from hadoop-tools. This converts the user-facing HADOOP_OPTIONAL_TOOLS
# to the HADOOP_TOOLS_OPTIONS that the shell profiles expect.
# See dist-tools-hooks-maker for how the example HADOOP_OPTIONAL_TOOLS
# gets populated into hadoop-env.sh
for i in ${HADOOP_OPTIONAL_TOOLS//,/ }; do
hadoop_add_entry HADOOP_TOOLS_OPTIONS "${i}"
done
for i in "${files1[@]}" "${files2[@]}"
do
if [[ -n "${i}"
&& -f "${i}" ]]; then
hadoop_debug "Profiles: importing ${i}"
# shellcheck disable=SC1090
. "${i}"
fi
done
}
## @description Initialize the registered shell profiles
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_shellprofiles_init
{
local i
for i in ${HADOOP_SHELL_PROFILES}
do
if declare -F _${i}_hadoop_init >/dev/null ; then
hadoop_debug "Profiles: ${i} init"
# shellcheck disable=SC2086
_${i}_hadoop_init
fi
done
}
## @description Apply the shell profile classpath additions
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_shellprofiles_classpath
{
local i
for i in ${HADOOP_SHELL_PROFILES}
do
if declare -F _${i}_hadoop_classpath >/dev/null ; then
hadoop_debug "Profiles: ${i} classpath"
# shellcheck disable=SC2086
_${i}_hadoop_classpath
fi
done
}
## @description Apply the shell profile native library additions
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_shellprofiles_nativelib
{
local i
for i in ${HADOOP_SHELL_PROFILES}
do
if declare -F _${i}_hadoop_nativelib >/dev/null ; then
hadoop_debug "Profiles: ${i} nativelib"
# shellcheck disable=SC2086
_${i}_hadoop_nativelib
fi
done
}
## @description Apply the shell profile final configuration
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_shellprofiles_finalize
{
local i
for i in ${HADOOP_SHELL_PROFILES}
do
if declare -F _${i}_hadoop_finalize >/dev/null ; then
hadoop_debug "Profiles: ${i} finalize"
# shellcheck disable=SC2086
_${i}_hadoop_finalize
fi
done
}
## @description Initialize the Hadoop shell environment, now that
## @description user settings have been imported
## @audience private
## @stability evolving
## @replaceable no
function hadoop_basic_init
{
# Some of these are also set in hadoop-env.sh.
# we still set them here just in case hadoop-env.sh is
# broken in some way, set up defaults, etc.
#
# but it is important to note that if you update these
# you also need to update hadoop-env.sh as well!!!
CLASSPATH=""
hadoop_debug "Initialize CLASSPATH"
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
[[ -d "${HADOOP_HOME}/${HADOOP_COMMON_DIR}" ]]; then
export HADOOP_COMMON_HOME="${HADOOP_HOME}"
fi
# default policy file for service-level authorization
HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
# define HADOOP_HDFS_HOME
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
[[ -d "${HADOOP_HOME}/${HDFS_DIR}" ]]; then
export HADOOP_HDFS_HOME="${HADOOP_HOME}"
fi
# define HADOOP_YARN_HOME
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
[[ -d "${HADOOP_HOME}/${YARN_DIR}" ]]; then
export HADOOP_YARN_HOME="${HADOOP_HOME}"
fi
# define HADOOP_MAPRED_HOME
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
[[ -d "${HADOOP_HOME}/${MAPRED_DIR}" ]]; then
export HADOOP_MAPRED_HOME="${HADOOP_HOME}"
fi
if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
hadoop_error "ERROR: Invalid HADOOP_COMMON_HOME"
exit 1
fi
if [[ ! -d "${HADOOP_HDFS_HOME}" ]]; then
hadoop_error "ERROR: Invalid HADOOP_HDFS_HOME"
exit 1
fi
if [[ ! -d "${HADOOP_YARN_HOME}" ]]; then
hadoop_error "ERROR: Invalid HADOOP_YARN_HOME"
exit 1
fi
if [[ ! -d "${HADOOP_MAPRED_HOME}" ]]; then
hadoop_error "ERROR: Invalid HADOOP_MAPRED_HOME"
exit 1
fi
# if for some reason the shell doesn't have $USER defined
# (e.g., ssh'd in to execute a command)
# let's get the effective username and use that
USER=${USER:-$(id -nu)}
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
HADOOP_NICENESS=${HADOOP_NICENESS:-0}
HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp}
HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}
HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA}
HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
}
## @description Set the worker support information to the contents
## @description of `filename`
## @audience public
## @stability stable
## @replaceable no
## @param filename
## @return will exit if file does not exist
function hadoop_populate_workers_file
{
local workersfile=$1
shift
if [[ -f "${workersfile}" ]]; then
HADOOP_WORKERS="${workersfile}"
elif [[ -f "${HADOOP_CONF_DIR}/${workersfile}" ]]; then
HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}"
else
hadoop_error "ERROR: Cannot find hosts file \"${workersfile}\""
hadoop_exit_with_usage 1
fi
}
## @description Rotates the given `file` until `number` of
## @description files exist.
## @audience public
## @stability stable
## @replaceable no
## @param filename
## @param [number]
## @return $? will contain last mv's return value
function hadoop_rotate_log
{
#
# Users are likely to replace this one for something
# that gzips or uses dates or who knows what.
#
# be aware that &1 and &2 might go through here
# so don't do anything too crazy...
#
local log=$1;
local num=${2:-5};
if [[ -f "${log}" ]]; then # rotate logs
while [[ ${num} -gt 1 ]]; do
#shellcheck disable=SC2086
let prev=${num}-1
if [[ -f "${log}.${prev}" ]]; then
mv "${log}.${prev}" "${log}.${num}"
fi
num=${prev}
done
mv "${log}" "${log}.${num}"
fi
}
## @description Via ssh, log into `hostname` and run `command`
## @audience private
## @stability evolving
## @replaceable yes
## @param hostname
## @param command
## @param [...]
function hadoop_actual_ssh
{
# we are passing this function to xargs
# should get hostname followed by rest of command line
local worker=$1
shift
# shellcheck disable=SC2086
ssh ${HADOOP_SSH_OPTS} ${worker} $"${@// /\\ }" 2>&1 | sed "s/^/$worker: /"
}
## @description Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES}
## @description and execute command.
## @audience private
## @stability evolving
## @replaceable yes
## @param command
## @param [...]
function hadoop_connect_to_hosts
{
# shellcheck disable=SC2124
local params="$@"
local worker_file
local tmpslvnames
#
# ssh (or whatever) to a host
#
# User can specify hostnames or a file where the hostnames are (not both)
if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting."
exit 1
elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
if [[ -n "${HADOOP_WORKERS}" ]]; then
worker_file=${HADOOP_WORKERS}
elif [[ -f "${HADOOP_CONF_DIR}/workers" ]]; then
worker_file=${HADOOP_CONF_DIR}/workers
elif [[ -f "${HADOOP_CONF_DIR}/slaves" ]]; then
hadoop_error "WARNING: 'slaves' file has been deprecated. Please use 'workers' file instead."
worker_file=${HADOOP_CONF_DIR}/slaves
fi
fi
# if pdsh is available, let's use it. otherwise default
# to a loop around ssh. (ugh)
if [[ -e '/usr/bin/pdsh' ]]; then
if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then
# if we were given a file, just let pdsh deal with it.
# shellcheck disable=SC2086
PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
-f "${HADOOP_SSH_PARALLEL}" -w ^"${worker_file}" $"${@// /\\ }" 2>&1
else
# no spaces allowed in the pdsh arg host list
# shellcheck disable=SC2086
tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,)
PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
-f "${HADOOP_SSH_PARALLEL}" \
-w "${tmpslvnames}" $"${@// /\\ }" 2>&1
fi
else
if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}")
fi
hadoop_connect_to_hosts_without_pdsh "${params}"
fi
}
## @description Connect to ${HADOOP_WORKER_NAMES} and execute command
## @description under the environment which does not support pdsh.
## @audience private
## @stability evolving
## @replaceable yes
## @param command
## @param [...]
function hadoop_connect_to_hosts_without_pdsh
{
# shellcheck disable=SC2124
local params="$@"
local workers=(${HADOOP_WORKER_NAMES})
for (( i = 0; i < ${#workers[@]}; i++ ))
do
if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then
wait
fi
# shellcheck disable=SC2086
hadoop_actual_ssh "${workers[$i]}" ${params} &
done
wait
}
## @description Utility routine to handle --workers mode
## @audience private
## @stability evolving
## @replaceable yes
## @param commandarray
function hadoop_common_worker_mode_execute
{
#
# input should be the command line as given by the user
# in the form of an array
#
local argv=("$@")
# if --workers is still on the command line, remove it
# to prevent loops
# Also remove --hostnames and --hosts along with arg values
local argsSize=${#argv[@]};
for (( i = 0; i < argsSize; i++ ))
do
if [[ "${argv[$i]}" =~ ^--workers$ ]]; then
unset argv[$i]
elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] ||
[[ "${argv[$i]}" =~ ^--hosts$ ]]; then
unset argv[$i];
let i++;
unset argv[$i];
fi
done
if [[ ${QATESTMODE} = true ]]; then
echo "${argv[@]}"
return
fi
hadoop_connect_to_hosts -- "${argv[@]}"
}
## @description Verify that a shell command was passed a valid
## @description class name
## @audience public
## @stability stable
## @replaceable yes
## @param classname
## @return 0 = success
## @return 1 = failure w/user message
function hadoop_validate_classname
{
local class=$1
shift 1
if [[ ! ${class} =~ \. ]]; then
# assuming the arg is typo of command if it does not conatain ".".
# class belonging to no package is not allowed as a result.
hadoop_error "ERROR: ${class} is not COMMAND nor fully qualified CLASSNAME."
return 1
fi
return 0
}
## @description Append the `appendstring` if `checkstring` is not
## @description present in the given `envvar`
## @audience public
## @stability stable
## @replaceable yes
## @param envvar