/
default.rb
171 lines (146 loc) · 5.09 KB
/
default.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#
# Cookbook Name:: hbase
# Recipe:: default
#
#
# Portions Copyright (c) 2012-2014 VMware, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "java::sun"
include_recipe "hadoop_common::pre_run"
include_recipe "hadoop_cluster::hadoop_conf_xml"
# alias home dir
if is_pivotalhd_distro
node.normal[:hbase][:home_dir] = '/usr/lib/gphd/hbase'
node.normal[:hbase][:conf_dir] = '/etc/gphd/hbase/conf'
end
force_link("/usr/lib/hbase", node[:hbase][:home_dir])
group "hbase" do
end
user "hbase" do
gid "hbase"
shell "/bin/bash"
password nil
end
ulimit_nofile = 32768
ulimit_nproc = 32000
set_sys_limit "Increase maximum num of open files ulimit", "@hbase", "nofile", ulimit_nofile
set_sys_limit "Increase maximum num of processes ulimit", "@hbase", "nproc", ulimit_nproc
set_bootstrap_action(ACTION_INSTALL_PACKAGE, 'hbase', true)
if is_install_from_tarball then
include_recipe "hbase::install_from_tarball"
else
include_recipe "hbase::install_from_package"
end
# link HBase log dir to the mounted data disk to get larger disk space
disk_dir = disks_mount_points[0]
if disk_dir
dirs = { '/var/log/hbase' => 'hbase/log' }
dirs.map do |src, des|
target = "#{disk_dir}/#{des}"
directory target do
owner "hbase"
group "hbase"
mode "0755"
recursive true
end
force_link src, target
end
end
dirs = ["/var/run/hbase", "/etc/hbase"]
dirs += ["/var/log/hbase"] unless disk_dir
dirs.each do |dir|
directory dir do
owner "hbase"
group "hbase"
mode "0755"
end
end
# Different Hadoop distro may have different conf dir. We will link '/etc/hbase/conf' to it.
make_link("#{node[:hbase][:home_dir]}/conf", node[:hbase][:conf_dir])
hbase_conf_dir = "/etc/hbase/conf"
link hbase_conf_dir do
to node[:hbase][:conf_dir]
not_if {File.exist?(hbase_conf_dir)} # to be compatible with CDH4
end
valid_namespaces_map = {}
default_namespace = ''
nn_port = namenode_port
if node[:hadoop][:cluster_has_hdfs_ha_or_federation]
# map valid namespace name to all all its addresses and facet
namenode_facet_addresses.each do |facet_addresses|
facet_addresses.each do |facet, addresses|
addresses_copy = addresses.dup
if addresses_copy.length == 1
valid_namespaces_map["#{addresses_copy[0]}:#{nn_port}"] = addresses_copy << facet
else
valid_namespaces_map[facet] = addresses_copy << facet
end
end
end
# the default namespace, just select the first namespace
if node[:hadoop][:cluster_has_only_federation] or namenode_facet_addresses[0][namenode_facet_names[0]].length == 1
default_namespace = namenode_facet_addresses[0][namenode_facet_names[0]][0] + ":#{nn_port}"
else
default_namespace = namenode_facet_names[0]
end
else
default_namespace = namenode_address + ":#{nn_port}"
valid_namespaces_map[default_namespace] = [default_namespace]
end
matched_namespace = nil
matched_pattern = ''
# try to guess a valid namespace name if user defined hbase.rootdir attr
rootdir = rootdir_conf
if rootdir
user_defined_namespace = rootdir
valid_namespaces_map.each do |namespace, patterns|
patterns.each do |pattern|
if user_defined_namespace.include? pattern and pattern.length > matched_pattern.length
matched_pattern = pattern
matched_namespace = namespace
end
end
end
end
namespace = matched_namespace || default_namespace
hbase_hdfs_home = get_hbase_root_dir(namespace)
# get zookeeper_session_timeout to be used in hbase-daemon.sh
zookeeper_session_timeout = node['cluster_configuration']['hbase']['hbase-site.xml']['zookeeper.session.timeout'] rescue nil
zookeeper_session_timeout ||= node[:hbase][:zookeeper_session_timeout]
zookeeper_session_timeout = zookeeper_session_timeout.to_i / 1000 + 120 # convert to seconds, and plus extra 2 minutes
zk_quorum = zookeepers_quorum
template_variables = {
:hbase_hdfs_home => hbase_hdfs_home,
:zookeeper_quorum => zk_quorum,
:zookeeper_session_timeout => zookeeper_session_timeout,
:http_address => fqdn_of_hdfs_network(node),
:bind_interface => device_of_hdfs_network(node)
}
%w[ hbase-site.xml hbase-env.sh log4j.properties hadoop-metrics.properties ].each do |file|
template "#{hbase_conf_dir}/#{file}" do
owner "hbase"
mode file.end_with?('.sh') ? "0755" : "0644"
source "#{file}.erb"
variables(template_variables)
end
end
%w[ hbase-daemon.sh ].each do |file|
template "#{node[:hbase][:home_dir]}/bin/#{file}" do
owner "hbase"
mode file.end_with?('.sh') ? "0755" : "0644"
source "#{file}.erb"
variables(template_variables)
end
end
wait_for_zookeepers_service
clear_bootstrap_action