Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Revert "Revert "Creating 2 jobs that contain various hadoop, hbase an…

…d opentsdb components.""

This reverts commit 38aab17.

Change-Id: Ic33beb1d6e6c975f1611be0897a8dddcdfe89f95
  • Loading branch information...
commit bc1e530d5780a30ed5c1ea33dbf480306d020fd7 1 parent a993798
@lisbakke lisbakke authored
Showing with 925 additions and 117 deletions.
  1. +8 −4 config/blobs.yml
  2. +19 −0 jobs/hbase_master/monit
  3. +19 −0 jobs/hbase_master/spec
  4. +12 −0 jobs/hbase_master/templates/hadoop.core-site.xml.erb
  5. +17 −0 jobs/hbase_master/templates/hadoop.hdfs-site.xml
  6. +2 −0  jobs/hbase_master/templates/hadoop.slaves.erb
  7. +70 −0 jobs/hbase_master/templates/hadoop_namenode_ctl
  8. +58 −0 jobs/hbase_master/templates/hbase.hadoop-metrics.properties
  9. +76 −0 jobs/hbase_master/templates/hbase.hbase-env.sh
  10. +48 −0 jobs/hbase_master/templates/hbase.hbase-site.xml.erb
  11. +13 −0 jobs/hbase_master/templates/hbase.log4j.properties
  12. +2 −0  jobs/hbase_master/templates/hbase.regionservers.erb
  13. +57 −0 jobs/hbase_master/templates/hbase_master_ctl
  14. +55 −0 jobs/hbase_master/templates/hbase_zookeeper_ctl
  15. +12 −0 jobs/hbase_slave/monit
  16. +18 −0 jobs/hbase_slave/spec
  17. +12 −0 jobs/hbase_slave/templates/hadoop.core-site.xml.erb
  18. +17 −0 jobs/hbase_slave/templates/hadoop.hdfs-site.xml
  19. +2 −0  jobs/hbase_slave/templates/hadoop.slaves.erb
  20. +62 −0 jobs/hbase_slave/templates/hadoop_datanode_ctl.erb
  21. +58 −0 jobs/hbase_slave/templates/hbase.hadoop-metrics.properties
  22. +76 −0 jobs/hbase_slave/templates/hbase.hbase-env.sh
  23. +48 −0 jobs/hbase_slave/templates/hbase.hbase-site.xml.erb
  24. +13 −0 jobs/hbase_slave/templates/hbase.log4j.properties
  25. +2 −0  jobs/hbase_slave/templates/hbase.regionservers.erb
  26. +55 −0 jobs/hbase_slave/templates/hbase_regionserver_ctl
  27. +0 −7 jobs/opentsdb/monit
  28. +8 −9 jobs/opentsdb/spec
  29. +0 −4 jobs/opentsdb/templates/cleanup_tmpdir
  30. +1 −1  jobs/opentsdb/templates/hbase.hbase-env.sh
  31. +0 −16 jobs/opentsdb/templates/hbase.hbase-site.xml
  32. +48 −0 jobs/opentsdb/templates/hbase.hbase-site.xml.erb
  33. +1 −1  jobs/opentsdb/templates/hbase.log4j.properties
  34. +0 −1  jobs/opentsdb/templates/hbase.regionservers
  35. +2 −0  jobs/opentsdb/templates/hbase.regionservers.erb
  36. +0 −54 jobs/opentsdb/templates/hbase_ctl
  37. +19 −10 jobs/opentsdb/templates/{opentsdb_ctl → opentsdb_ctl.erb}
  38. +8 −0 packages/hadoop/packaging
  39. +4 −0 packages/hadoop/spec
  40. +2 −8 packages/hbase/packaging
  41. +1 −2  packages/hbase/spec
View
12 config/blobs.yml
@@ -95,10 +95,6 @@ hbase/native-lib.tar.gz:
object_id: eyJvaWQiOiI0ZTRlNzhiY2E2MWUxMjEwMDRlNGU3ZDUxZDk1MGUwNGYzMDY4%0AZTQ5ODhjOCIsInNpZyI6IkFlK1c2bGlOVFVqSGk2alB0Q3IzUlBOR3pvdz0i%0AfQ==%0A
sha: 8d35c05ce07f0af4a19926fb1497ba101b4116ba
size: 546495
-hbase/hbase-0.90.3-cdh3u1.tar.gz:
- object_id: eyJvaWQiOiI0ZTRlNzhiY2E2MWUxMjIwMDRlNGU4ZWM2YjQ0YjYwNGYzMDY4%0AZWEyNmVlNyIsInNpZyI6IlMvZ1R5ZXlRdHhLZUxqSGU0QUU5WmErd1F3WT0i%0AfQ==%0A
- sha: 34e7de45ed6c36f618e9625128422023868c5e51
- size: 35113844
postgresql/postgresql-9.0-x86_64.tar.gz:
object_id: eyJvaWQiOiI0ZTRlNzhiY2ExMWUxMjEwMDRlNGU3ZDUxMWY4MjEwNGYzMDY4%0AZmI3MTJlNCIsInNpZyI6IkMzS0hrcTAxWnZSVU9wQVI2OWFXWXFlR1FVST0i%0AfQ==%0A
sha: 1c777413ce355ebf79c5786e7dbc5a060883986f
@@ -579,3 +575,11 @@ uaa/cloudfoundry-identity-uaa-1.1.2.war:
object_id: eyJvaWQiOiI0ZTRlNzhiY2E1MWUxMjEwMDRlNGU3ZDUxOTA2Y2QwNTAzZDBl%0AMWMwY2RlMyIsInNpZyI6InZ1L3k5N05jaC9GNUljVFZuR2tpMXJpS29vaz0i%0AfQ==%0A
sha: f84e77cc0e6193755913fa59bf0d88c2125705a0
size: 24268942
+hadoop/hadoop-0.20.2-cdh3u4.tar.gz:
+ object_id: eyJvaWQiOiI0ZTRlNzhiY2E1MWUxMjIyMDRlNGU5ODYzZjI4ZjMwNGZiZDAx%0AODQwN2YwYiIsInNpZyI6ImtrWDA3SXVFVzNiTmNPSHhqMGNhd0Nmb2I0RT0i%0AfQ==%0A
+ sha: 1a9e6437b4b7f0bdbfdd3bd9170a33238e151f12
+ size: 70157279
+hbase/hbase-0.90.6-cdh3u4-vmware-patched.tar.gz:
+ object_id: eyJvaWQiOiI0ZTRlNzhiY2ExMWUxMjIyMDRlNGU5ODYzOGI3NjMwNTAxMDVh%0AYzVjNTkwMSIsInNpZyI6IldlMi9yRjRybFZ3Y1ptR3FwWklzcHFZT2RiUT0i%0AfQ==%0A
+ sha: 4290e2a35fc54d34d8ea4e0559e6de6e485322d4
+ size: 36978703
View
19 jobs/hbase_master/monit
@@ -0,0 +1,19 @@
+check process hadoop_namenode
+ with pidfile /var/vcap/sys/run/hbase_master/hadoop-vcap-namenode.pid
+ start program "/var/vcap/jobs/hbase_master/bin/hadoop_namenode_ctl start"
+ stop program "/var/vcap/jobs/hbase_master/bin/hadoop_namenode_ctl stop"
+ group vcap
+
+check process hbase_zookeeper
+ with pidfile /var/vcap/sys/run/hbase_master/hbase-vcap-zookeeper.pid
+ start program "/var/vcap/jobs/hbase_master/bin/hbase_zookeeper_ctl start"
+ stop program "/var/vcap/jobs/hbase_master/bin/hbase_zookeeper_ctl stop"
+ depends on hadoop_namenode
+ group vcap
+
+check process hbase_master
+ with pidfile /var/vcap/sys/run/hbase_master/hbase-vcap-master.pid
+ start program "/var/vcap/jobs/hbase_master/bin/hbase_master_ctl start"
+ stop program "/var/vcap/jobs/hbase_master/bin/hbase_master_ctl stop"
+ depends on hbase_zookeeper
+ group vcap
View
19 jobs/hbase_master/spec
@@ -0,0 +1,19 @@
+---
+name: hbase_master
+templates:
+ hadoop.core-site.xml.erb: config/hadoop/core-site.xml
+ hadoop.hdfs-site.xml: config/hadoop/hdfs-site.xml
+ hadoop.slaves.erb: config/hadoop/slaves
+ hadoop_namenode_ctl: bin/hadoop_namenode_ctl
+ hbase.hadoop-metrics.properties: config/hbase/hadoop-metrics.properties
+ hbase.hbase-env.sh: config/hbase/hbase-env.sh
+ hbase.hbase-site.xml.erb: config/hbase/hbase-site.xml
+ hbase.log4j.properties: config/hbase/log4j.properties
+ hbase.regionservers.erb: config/hbase/regionservers
+ hbase_master_ctl: bin/hbase_master_ctl
+ hbase_zookeeper_ctl: bin/hbase_zookeeper_ctl
+packages:
+- common
+- dea_jvm
+- hadoop
+- hbase
View
12 jobs/hbase_master/templates/hadoop.core-site.xml.erb
@@ -0,0 +1,12 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.default.name</name>
+ <value>hdfs://<%= properties.hbase_master.address %>:<%= properties.hbase_master.hadoop_namenode.port %></value>
+ <description>URI of NameNode.</description>
+ </property>
+</configuration>
View
17 jobs/hbase_master/templates/hadoop.hdfs-site.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.name.dir</name>
+ <value>/var/vcap/store/hbase_master/name</value>
+ <description>Path on the local filesystem where the NameNode stores the namespace and transactions logs persistently.</description>
+ </property>
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/var/vcap/store/hbase_master/data</value>
+ <description>Comma separated list of paths on the local filesystem of where a DataNode should store its blocks.</description>
+ </property>
+</configuration>
View
2  jobs/hbase_master/templates/hadoop.slaves.erb
@@ -0,0 +1,2 @@
+<% for address in properties.hbase_slave.addresses %><%= address %>
+<% end %>
View
70 jobs/hbase_master/templates/hadoop_namenode_ctl
@@ -0,0 +1,70 @@
+#!/bin/bash -e
+
+HADOOP_PACKAGE_DIR="/var/vcap/packages/hadoop"
+JOBS_DIR="/var/vcap/jobs/hbase_master"
+LOG_DIR="/var/vcap/sys/log/hbase_master"
+RUN_DIR="/var/vcap/sys/run/hbase_master"
+
+HADOOP_BIN="${HADOOP_PACKAGE_DIR}/bin"
+HADOOP_DATA_DIR="/var/vcap/store/hbase_master"
+MASTER_HOSTNAME="<%= properties.hbase_master.hostname %>"
+PIDFILE="${RUN_DIR}/hadoop-vcap-namenode.pid"
+TMP_DIR="/var/vcap/store/hbase"
+
+export HADOOP_CONF_DIR="${JOBS_DIR}/config/hadoop"
+export HADOOP_IDENT_STRING="vcap"
+export HADOOP_LOG_DIR=${LOG_DIR}
+export HADOOP_NAMENODE_USER="vcap"
+export HADOOP_PID_DIR=${RUN_DIR}
+export JAVA_HOME="/var/vcap/packages/dea_jvm"
+
+source /var/vcap/packages/common/utils.sh
+
+case $1 in
+
+ start)
+ # hadoop-daemon.sh has its own pid guard,
+ # we use ours for consistency anyway
+ pid_guard $PIDFILE "hadoop_namenode"
+
+ mkdir -p $RUN_DIR
+ mkdir -p $LOG_DIR
+ mkdir -p $HADOOP_DATA_DIR
+ mkdir -p $TMP_DIR
+
+ chown vcap:vcap $RUN_DIR
+ chown vcap:vcap $LOG_DIR
+ chown vcap:vcap $HADOOP_DATA_DIR
+ chown vcap:vcap $TMP_DIR
+ # Set maximum number of open file descriptors.
+ ulimit -n 32768
+ # Set maximum number of processes available to a single user.
+ ulimit -u 32000
+
+ if [ ! -d /var/vcap/store/hbase_master/name ]
+ then
+ cd "${HADOOP_PACKAGE_DIR}/bin"
+ ./hadoop --config $HADOOP_CONF_DIR namenode -format
+ fi
+
+ # hadoop ctl scripts manage their pidfiles,
+ # so we don't attempt to write ours
+ exec chpst -u vcap:vcap $HADOOP_BIN/hadoop-daemon.sh --config $HADOOP_CONF_DIR start namenode \
+ >>$LOG_DIR/hadoop_namenode_start.stdout.log \
+ 2>>$LOG_DIR/hadoop_namenode_start.stderr.log
+
+ ;;
+
+ stop)
+ exec chpst -u vcap:vcap $HADOOP_BIN/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop namenode \
+ >>$LOG_DIR/hadoop_namenode_stop.stdout.log \
+ 2>>$LOG_DIR/hadoop_namenode_stop.stderr.log
+
+ ;;
+
+ *)
+ echo "Usage: hadoop_namenode_ctl {start|stop}"
+
+ ;;
+
+esac
View
58 jobs/hbase_master/templates/hbase.hadoop-metrics.properties
@@ -0,0 +1,58 @@
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# Configuration of the "hbase" context for null
+hbase.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "hbase" context for file
+# hbase.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# hbase.period=10
+# hbase.fileName=/tmp/metrics_hbase.log
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# hbase.period=10
+# hbase.servers=GMETADHOST_IP:8649
+
+# Configuration of the "jvm" context for null
+jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+# jvm.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# jvm.period=10
+# jvm.fileName=/tmp/metrics_jvm.log
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=GMETADHOST_IP:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+# rpc.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# rpc.period=10
+# rpc.fileName=/tmp/metrics_rpc.log
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=GMETADHOST_IP:8649
View
76 jobs/hbase_master/templates/hbase.hbase-env.sh
@@ -0,0 +1,76 @@
+#
+#/**
+# * Copyright 2007 The Apache Software Foundation
+# *
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+# export JAVA_HOME=/usr/java/jdk1.6.0/
+
+# Extra Java CLASSPATH elements. Optional.
+# export HBASE_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=3072
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
+
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# export HBASE_MASTER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101 -javaagent:lib/HelloWorldAgent.jar"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+# export HBASE_LOG_DIR=${HBASE_HOME}/logs
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+# export HBASE_PID_DIR=/var/hadoop/pids
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+# export HBASE_MANAGES_ZK=true
View
48 jobs/hbase_master/templates/hbase.hbase-site.xml.erb
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>hbase.rootdir</name>
+ <value>hdfs://<%= properties.hbase_master.address %>:<%= properties.hbase_master.hadoop_namenode.port %>/hbase</value>
+ </property>
+ <property>
+ <name>hbase.master.port</name>
+ <value><%= properties.hbase_master.hbase_master.port %></value>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value><%= properties.hbase_master.hbase_master.webui_port %></value>
+ </property>
+ <property>
+ <name>hbase.use_ips</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>hbase.master.dns.interface</name>
+ <value>eth0</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.dns.interface</name>
+ <value>eth0</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.port</name>
+ <value><%= properties.hbase_slave.hbase_regionserver.port %></value>
+ </property>
+ <property>
+ <name>hbase.tmp.dir</name>
+ <value>/var/vcap/store/hbase</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value><%= properties.hbase_master.address %></value>
+ </property>
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ <description>The mode the cluster will be in. Possible values are
+ false: standalone and pseudo-distributed setups with managed Zookeeper
+ true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)
+ </description>
+ </property>
+</configuration>
View
13 jobs/hbase_master/templates/hbase.log4j.properties
@@ -0,0 +1,13 @@
+log4j.rootLogger=INFO,FA
+
+log4j.threshhold=ALL
+
+log4j.appender.FA=org.apache.log4j.FileAppender
+log4j.appender.FA.File=/var/vcap/sys/log/hbase_master/hbase_master.log
+log4j.appender.FA.layout=org.apache.log4j.PatternLayout
+log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+log4j.logger.org.apache.zookeeper=INFO
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
View
2  jobs/hbase_master/templates/hbase.regionservers.erb
@@ -0,0 +1,2 @@
+<% for address in properties.hbase_slave.addresses %><%= address %>
+<% end %>
View
57 jobs/hbase_master/templates/hbase_master_ctl
@@ -0,0 +1,57 @@
+#!/bin/bash -e
+
+HBASE_BIN="/var/vcap/packages/hbase/bin"
+LOG_DIR="/var/vcap/sys/log/hbase_master"
+RUN_DIR="/var/vcap/sys/run/hbase_master"
+
+PIDFILE="${RUN_DIR}/hbase-vcap-master.pid"
+
+export HADOOP_HOME="/var/vcap/packages/hadoop"
+export HBASE_CONF_DIR="/var/vcap/jobs/hbase_master/config/hbase"
+export HBASE_HEAPSIZE=<%= properties.hbase_master.hbase_master.heap_size %>
+export HBASE_IDENT_STRING="vcap"
+export HBASE_LOG_DIR=$LOG_DIR
+export HBASE_PID_DIR=$RUN_DIR
+export JAVA_HOME="/var/vcap/packages/dea_jvm"
+
+source /var/vcap/packages/common/utils.sh
+
+case $1 in
+
+ start)
+ # hbase-daemon.sh has its own pid guard,
+ # we use ours for consistency anyway
+ pid_guard $PIDFILE "hbase_master"
+
+ mkdir -p $RUN_DIR
+ mkdir -p $LOG_DIR
+
+ . ${HBASE_BIN}/hbase-config.sh
+ errCode=$?
+ if [ $errCode -ne 0 ]
+ then
+ exit $errCode
+ fi
+
+ # HBase ctl scripts manage their pidfiles,
+ # so we don't attempt to write ours
+ exec chpst -u vcap:vcap ${HBASE_BIN}/hbase-daemon.sh --config "${HBASE_CONF_DIR}" start master \
+ >>$LOG_DIR/hbase_master_start.stdout.log \
+ 2>>$LOG_DIR/hbase_master_start.stderr.log
+
+ ;;
+
+ stop)
+
+ exec chpst -u vcap:vcap ${HBASE_BIN}/hbase-daemon.sh --config "${HBASE_CONF_DIR}" stop master \
+ >>$LOG_DIR/hbase_master_stop.stdout.log \
+ 2>>$LOG_DIR/hbase_master_stop.stderr.log
+
+ ;;
+
+ *)
+ echo "Usage: hbase_master_ctl {start|stop}"
+
+ ;;
+
+esac
View
55 jobs/hbase_master/templates/hbase_zookeeper_ctl
@@ -0,0 +1,55 @@
+#!/bin/bash -e
+
+HBASE_BIN="/var/vcap/packages/hbase/bin"
+LOG_DIR="/var/vcap/sys/log/hbase_master"
+RUN_DIR="/var/vcap/sys/run/hbase_master"
+TMP_DIR="/var/vcap/store/hbase"
+
+PIDFILE="${RUN_DIR}/hbase-vcap-zookeeper.pid"
+
+export HADOOP_HOME="/var/vcap/packages/hadoop"
+export HBASE_CONF_DIR="/var/vcap/jobs/hbase_master/config/hbase"
+export HBASE_HEAPSIZE=<%= properties.hbase_master.hbase_zookeeper.heap_size %>
+export HBASE_IDENT_STRING="vcap"
+export HBASE_LOG_DIR=$LOG_DIR
+export HBASE_PID_DIR=$RUN_DIR
+export JAVA_HOME="/var/vcap/packages/dea_jvm"
+
+source /var/vcap/packages/common/utils.sh
+
+case $1 in
+
+ start)
+ # hbase-daemon.sh has its own pid guard,
+ # we use ours for consistency anyway
+ pid_guard $PIDFILE "hbase_zookeeper"
+
+ . ${HBASE_BIN}/hbase-config.sh
+ errCode=$?
+ if [ $errCode -ne 0 ]
+ then
+ exit $errCode
+ fi
+
+ # HBase ctl scripts manage their pidfiles,
+ # so we don't attempt to write ours
+ exec ${HBASE_BIN}/hbase-daemon.sh --config "${HBASE_CONF_DIR}" start zookeeper \
+ >>$LOG_DIR/hbase_zookeeper_start.stdout.log \
+ 2>>$LOG_DIR/hbase_zookeeper_start.stderr.log
+
+ ;;
+
+ stop)
+
+ exec ${HBASE_BIN}/hbase-daemon.sh --config "${HBASE_CONF_DIR}" stop zookeeper \
+ >>$LOG_DIR/hbase_zookeeper_stop.stdout.log \
+ 2>>$LOG_DIR/hbase_zookeeper_stop.stderr.log
+
+ ;;
+
+ *)
+ echo "Usage: hbase_zookeeper_ctl {start|stop}"
+
+ ;;
+
+esac
View
12 jobs/hbase_slave/monit
@@ -0,0 +1,12 @@
+check process hadoop_datanode
+ with pidfile /var/vcap/sys/run/hbase_slave/hadoop-vcap-datanode.pid
+ start program "/var/vcap/jobs/hbase_slave/bin/hadoop_datanode_ctl start"
+ stop program "/var/vcap/jobs/hbase_slave/bin/hadoop_datanode_ctl stop"
+ group vcap
+
+check process hbase_regionserver
+ with pidfile /var/vcap/sys/run/hbase_slave/hbase-vcap-regionserver.pid
+ start program "/var/vcap/jobs/hbase_slave/bin/hbase_regionserver_ctl start"
+ stop program "/var/vcap/jobs/hbase_slave/bin/hbase_regionserver_ctl stop"
+ depends on hadoop_datanode
+ group vcap
View
18 jobs/hbase_slave/spec
@@ -0,0 +1,18 @@
+---
+name: hbase_slave
+templates:
+ hadoop.core-site.xml.erb: config/hadoop/core-site.xml
+ hadoop.hdfs-site.xml: config/hadoop/hdfs-site.xml
+ hadoop.slaves.erb: config/hadoop/slaves
+ hadoop_datanode_ctl.erb: bin/hadoop_datanode_ctl
+ hbase.hadoop-metrics.properties: config/hbase/hadoop-metrics.properties
+ hbase.hbase-env.sh: config/hbase/hbase-env.sh
+ hbase.hbase-site.xml.erb: config/hbase/hbase-site.xml
+ hbase.log4j.properties: config/hbase/log4j.properties
+ hbase.regionservers.erb: config/hbase/regionservers
+ hbase_regionserver_ctl: bin/hbase_regionserver_ctl
+packages:
+- common
+- dea_jvm
+- hadoop
+- hbase
View
12 jobs/hbase_slave/templates/hadoop.core-site.xml.erb
@@ -0,0 +1,12 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.default.name</name>
+ <value>hdfs://<%= properties.hbase_master.address %>:<%= properties.hbase_master.hadoop_namenode.port %></value>
+ <description>URI of NameNode.</description>
+ </property>
+</configuration>
View
17 jobs/hbase_slave/templates/hadoop.hdfs-site.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.name.dir</name>
+ <value>/var/vcap/store/hbase_master/name</value>
+ <description>Path on the local filesystem where the NameNode stores the namespace and transactions logs persistently.</description>
+ </property>
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/var/vcap/store/hbase_master/data</value>
+ <description>Comma separated list of paths on the local filesystem of where a DataNode should store its blocks.</description>
+ </property>
+</configuration>
View
2  jobs/hbase_slave/templates/hadoop.slaves.erb
@@ -0,0 +1,2 @@
+<% for address in properties.hbase_slave.addresses %><%= address %>
+<% end %>
View
62 jobs/hbase_slave/templates/hadoop_datanode_ctl.erb
@@ -0,0 +1,62 @@
+#!/bin/bash -e
+
+LOG_DIR="/var/vcap/sys/log/hbase_slave"
+RUN_DIR="/var/vcap/sys/run/hbase_slave"
+
+HADOOP_BIN="/var/vcap/packages/hadoop/bin"
+HADOOP_DATA_DIR="/var/vcap/store/hbase_master"
+PIDFILE="${RUN_DIR}/hadoop-vcap-datanode.pid"
+TMP_DIR="/var/vcap/store/hbase"
+
+export HADOOP_CONF_DIR="/var/vcap/jobs/hbase_slave/config/hadoop"
+export HADOOP_DATANODE_USER="vcap"
+export HADOOP_IDENT_STRING="vcap"
+export HADOOP_LOG_DIR=${LOG_DIR}
+export HADOOP_PID_DIR=${RUN_DIR}
+export JAVA_HOME="/var/vcap/packages/dea_jvm"
+
+source /var/vcap/packages/common/utils.sh
+
+case $1 in
+
+ start)
+ # hadoop-daemon.sh has its own pid guard,
+ # we use ours for consistency anyway
+ pid_guard $PIDFILE "hadoop_datanode"
+
+ mkdir -p $RUN_DIR
+ mkdir -p $LOG_DIR
+ mkdir -p $HADOOP_DATA_DIR
+ mkdir -p $TMP_DIR
+
+ chown vcap:vcap $RUN_DIR
+ chown vcap:vcap $LOG_DIR
+ chown vcap:vcap $HADOOP_DATA_DIR
+ chown vcap:vcap $TMP_DIR
+
+ # Set maximum number of open file descriptors.
+ ulimit -n 32768
+ # Set maximum number of processes available to a single user.
+ ulimit -u 32000
+
+ # hadoop ctl scripts manage their pidfiles,
+ # so we don't attempt to write ours
+ exec chpst -u vcap:vcap $HADOOP_BIN/hadoop-daemon.sh --config $HADOOP_CONF_DIR start datanode \
+ >>$LOG_DIR/hadoop_datanode_start.stdout.log \
+ 2>>$LOG_DIR/hadoop_datanode_start.stderr.log
+
+ ;;
+
+ stop)
+ exec chpst -u vcap:vcap $HADOOP_BIN/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop datanode \
+ >>$LOG_DIR/hadoop_datanode_stop.stdout.log \
+ 2>>$LOG_DIR/hadoop_datanode_stop.stderr.log
+
+ ;;
+
+ *)
+ echo "Usage: hadoop_datanode_ctl {start|stop}"
+
+ ;;
+
+esac
View
58 jobs/hbase_slave/templates/hbase.hadoop-metrics.properties
@@ -0,0 +1,58 @@
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# Configuration of the "hbase" context for null
+hbase.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "hbase" context for file
+# hbase.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# hbase.period=10
+# hbase.fileName=/tmp/metrics_hbase.log
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# hbase.period=10
+# hbase.servers=GMETADHOST_IP:8649
+
+# Configuration of the "jvm" context for null
+jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+# jvm.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# jvm.period=10
+# jvm.fileName=/tmp/metrics_jvm.log
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=GMETADHOST_IP:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+# rpc.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# rpc.period=10
+# rpc.fileName=/tmp/metrics_rpc.log
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=GMETADHOST_IP:8649
View
76 jobs/hbase_slave/templates/hbase.hbase-env.sh
@@ -0,0 +1,76 @@
+#
+#/**
+# * Copyright 2007 The Apache Software Foundation
+# *
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements. See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership. The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+# export JAVA_HOME=/usr/java/jdk1.6.0/
+
+# Extra Java CLASSPATH elements. Optional.
+# export HBASE_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=3072
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
+
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# export HBASE_MASTER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101 -javaagent:lib/HelloWorldAgent.jar"
+# export HBASE_REGIONSERVER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+# export HBASE_LOG_DIR=${HBASE_HOME}/logs
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+# export HBASE_PID_DIR=/var/hadoop/pids
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+# export HBASE_MANAGES_ZK=true
View
48 jobs/hbase_slave/templates/hbase.hbase-site.xml.erb
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>hbase.rootdir</name>
+ <value>hdfs://<%= properties.hbase_master.address %>:<%= properties.hbase_master.hadoop_namenode.port %>/hbase</value>
+ </property>
+ <property>
+ <name>hbase.master.port</name>
+ <value><%= properties.hbase_master.hbase_master.port %></value>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value><%= properties.hbase_master.hbase_master.webui_port %></value>
+ </property>
+ <property>
+ <name>hbase.use_ips</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>hbase.master.dns.interface</name>
+ <value>eth0</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.dns.interface</name>
+ <value>eth0</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.port</name>
+ <value><%= properties.hbase_slave.hbase_regionserver.port %></value>
+ </property>
+ <property>
+ <name>hbase.tmp.dir</name>
+ <value>/var/vcap/store/hbase</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value><%= properties.hbase_master.address %></value>
+ </property>
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ <description>The mode the cluster will be in. Possible values are
+ false: standalone and pseudo-distributed setups with managed Zookeeper
+ true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)
+ </description>
+ </property>
+</configuration>
View
13 jobs/hbase_slave/templates/hbase.log4j.properties
@@ -0,0 +1,13 @@
+log4j.rootLogger=INFO,FA
+
+log4j.threshhold=ALL
+
+log4j.appender.FA=org.apache.log4j.FileAppender
+log4j.appender.FA.File=/var/vcap/sys/log/hbase_slave/hbase_regionserver.log
+log4j.appender.FA.layout=org.apache.log4j.PatternLayout
+log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+log4j.logger.org.apache.zookeeper=INFO
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
View
2  jobs/hbase_slave/templates/hbase.regionservers.erb
@@ -0,0 +1,2 @@
+<% for address in properties.hbase_slave.addresses %><%= address %>
+<% end %>
View
55 jobs/hbase_slave/templates/hbase_regionserver_ctl
@@ -0,0 +1,55 @@
+#!/bin/bash -e
+
+HBASE_BIN="/var/vcap/packages/hbase/bin"
+RUN_DIR="/var/vcap/sys/run/hbase_slave"
+LOG_DIR="/var/vcap/sys/log/hbase_slave"
+
+PIDFILE="${RUN_DIR}/hbase-vcap-regionserver.pid"
+
+export JAVA_HOME="/var/vcap/packages/dea_jvm"
+export HBASE_HEAPSIZE=<%= properties.hbase_slave.hbase_regionserver.heap_size %>
+export HBASE_IDENT_STRING="vcap"
+export HBASE_PID_DIR=$RUN_DIR
+export HBASE_LOG_DIR=$LOG_DIR
+export HBASE_CONF_DIR="/var/vcap/jobs/hbase_slave/config/hbase"
+export HADOOP_HOME="/var/vcap/packages/hadoop"
+
+source /var/vcap/packages/common/utils.sh
+
+case $1 in
+
+ start)
+ # start-hbase.sh has its own pid guard,
+ # we use ours for consistency anyway
+ pid_guard $PIDFILE "hbase_regionserver"
+
+ mkdir -p $RUN_DIR
+ mkdir -p $LOG_DIR
+
+ . ${HBASE_BIN}/hbase-config.sh
+ errCode=$?
+ if [ $errCode -ne 0 ]
+ then
+ exit $errCode
+ fi
+
+ exec chpst -u vcap:vcap ${HBASE_BIN}/hbase-daemon.sh --config "${HBASE_CONF_DIR}" start regionserver \
+ >>$LOG_DIR/hbase_regionserver-start.stdout.log \
+ 2>>$LOG_DIR/hbase_regionserver-start.stderr.log
+
+ ;;
+
+ stop)
+
+ exec chpst -u vcap:vcap ${HBASE_BIN}/hbase-daemon.sh --config "${HBASE_CONF_DIR}" stop regionserver \
+ >>$LOG_DIR/hbase_regionserver-start.stdout.log \
+ 2>>$LOG_DIR/hbase_regionserver-start.stderr.log
+
+ ;;
+
+ *)
+ echo "Usage: hbase_ctl {start|stop}"
+
+ ;;
+
+esac
View
7 jobs/opentsdb/monit
@@ -1,12 +1,5 @@
-check process hbase
- with pidfile /var/vcap/sys/run/opentsdb/hbase-vcap-master.pid
- start program "/var/vcap/jobs/opentsdb/bin/hbase_ctl start"
- stop program "/var/vcap/jobs/opentsdb/bin/hbase_ctl stop"
- group vcap
-
check process opentsdb
with pidfile /var/vcap/sys/run/opentsdb/opentsdb.pid
start program "/var/vcap/jobs/opentsdb/bin/opentsdb_ctl start"
stop program "/var/vcap/jobs/opentsdb/bin/opentsdb_ctl stop"
- depends on hbase
group vcap
View
17 jobs/opentsdb/spec
@@ -1,18 +1,17 @@
---
name: opentsdb
templates:
- opentsdb_ctl: bin/opentsdb_ctl
- hbase_ctl: bin/hbase_ctl
- tsdb: bin/tsdb
cleanup_tmpdir: bin/cleanup_tmpdir
hbase.hadoop-metrics.properties: config/hbase/hadoop-metrics.properties
hbase.hbase-env.sh: config/hbase/hbase-env.sh
+ hbase.hbase-site.xml.erb: config/hbase/hbase-site.xml
hbase.log4j.properties: config/hbase/log4j.properties
- hbase.hbase-site.xml: config/hbase/hbase-site.xml
- hbase.regionservers: config/hbase/regionservers
+ hbase.regionservers.erb: config/hbase/regionservers
opentsdb.logback.xml: config/opentsdb/logback.xml
+ opentsdb_ctl.erb: bin/opentsdb_ctl
+ tsdb: bin/tsdb
packages:
-- common
-- hbase
-- opentsdb
-- dea_jvm
+ - common
+ - dea_jvm
+ - hbase
+ - opentsdb
View
4 jobs/opentsdb/templates/cleanup_tmpdir
@@ -13,7 +13,3 @@ if [[ $dir && $threshold && -d $dir ]]; then
else
echo "Usage: $0 <dir> <threshold>"
fi
-
-
-
-
View
2  jobs/opentsdb/templates/hbase.hbase-env.sh
@@ -28,7 +28,7 @@
# export HBASE_CLASSPATH=
# The maximum amount of heap to use, in MB. Default is 1000.
-export HBASE_HEAPSIZE=3072
+# export HBASE_HEAPSIZE=3072
# Extra Java runtime options.
# Below are what we set by default. May only work with SUN JVM.
View
16 jobs/opentsdb/templates/hbase.hbase-site.xml
@@ -1,16 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
- <property>
- <name>hbase.rootdir</name>
- <value>file:////var/vcap/store/hbase</value>
- </property>
- <property>
- <name>hbase.tmp.dir</name>
- <value>/var/vcap/data/hbase</value>
- </property>
- <property>
- <name>hbase.zookeeper.dns.interface</name>
- <value>lo</value>
- </property>
-</configuration>
View
48 jobs/opentsdb/templates/hbase.hbase-site.xml.erb
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+ <property>
+ <name>hbase.rootdir</name>
+ <value>hdfs://<%= properties.hbase_master.address %>:<%= properties.hbase_master.hadoop_namenode.port %>/hbase</value>
+ </property>
+ <property>
+ <name>hbase.master.port</name>
+ <value><%= properties.hbase_master.hbase_master.port %></value>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value><%= properties.hbase_master.hbase_master.webui_port %></value>
+ </property>
+ <property>
+ <name>hbase.use_ips</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>hbase.master.dns.interface</name>
+ <value>eth0</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.dns.interface</name>
+ <value>eth0</value>
+ </property>
+ <property>
+ <name>hbase.regionserver.port</name>
+ <value><%= properties.hbase_slave.hbase_regionserver.port %></value>
+ </property>
+ <property>
+ <name>hbase.tmp.dir</name>
+ <value>/var/vcap/store/hbase</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value><%= properties.hbase_master.address %></value>
+ </property>
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ <description>The mode the cluster will be in. Possible values are
+ false: standalone and pseudo-distributed setups with managed Zookeeper
+ true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)
+ </description>
+ </property>
+</configuration>
View
2  jobs/opentsdb/templates/hbase.log4j.properties
@@ -3,7 +3,7 @@ log4j.rootLogger=INFO,FA
log4j.threshhold=ALL
log4j.appender.FA=org.apache.log4j.FileAppender
-log4j.appender.FA.File=/var/vcap/sys/log/opentsdb/hbase.log
+log4j.appender.FA.File=/var/vcap/sys/log/hbase_master/hbase_master.log
log4j.appender.FA.layout=org.apache.log4j.PatternLayout
log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
View
1  jobs/opentsdb/templates/hbase.regionservers
@@ -1 +0,0 @@
-localhost
View
2  jobs/opentsdb/templates/hbase.regionservers.erb
@@ -0,0 +1,2 @@
+<% for address in properties.hbase_slave.addresses %><%= address %>
+<% end %>
View
54 jobs/opentsdb/templates/hbase_ctl
@@ -1,54 +0,0 @@
-#!/bin/bash -e
-
-RUN_DIR=/var/vcap/sys/run/opentsdb
-LOG_DIR=/var/vcap/sys/log/opentsdb
-PIDFILE=$RUN_DIR/hbase-vcap-master.pid
-
-export JAVA_HOME=/var/vcap/packages/dea_jvm
-export HBASE_IDENT_STRING=vcap
-export HBASE_PID_DIR=$RUN_DIR
-export HBASE_LOG_DIR=$LOG_DIR
-export HBASE_CONF_DIR=/var/vcap/jobs/opentsdb/config/hbase
-
-source /var/vcap/packages/common/utils.sh
-
-case $1 in
-
- start)
- # start-hbase.sh has its own pid guard,
- # we use ours for consistency anyway
- pid_guard $PIDFILE "HBase"
-
- mkdir -p $RUN_DIR
- mkdir -p $LOG_DIR
-
- mkdir -p /var/vcap/data/hbase
- mkdir -p /var/vcap/store/hbase
-
- ulimit -n 32768
- ulimit -u 32000
-
- # HBase ctl scripts manage their pidfiles,
- # so we don't attempt to write ours
- cd /var/vcap/packages/hbase/
- exec bin/start-hbase.sh \
- >>$LOG_DIR/hbase-start.stdout.log \
- 2>>$LOG_DIR/hbase-start.stderr.log
-
- ;;
-
- stop)
- cd /var/vcap/packages/hbase/
-
- exec bin/stop-hbase.sh \
- >>$LOG_DIR/hbase-stop.stdout.log \
- 2>>$LOG_DIR/hbase-stop.stderr.log
-
- ;;
-
- *)
- echo "Usage: hbase_ctl {start|stop}"
-
- ;;
-
-esac
View
29 jobs/opentsdb/templates/opentsdb_ctl → jobs/opentsdb/templates/opentsdb_ctl.erb
@@ -1,16 +1,18 @@
#!/bin/bash -e
+export HBASE_CONF_DIR=/var/vcap/jobs/opentsdb/config/hbase
export JAVA_HOME=/var/vcap/packages/dea_jvm
export PATH=/var/vcap/packages/dea_jvm/bin:$PATH
-BIN_DIR=/var/vcap/jobs/opentsdb/bin
-RUN_DIR=/var/vcap/sys/run/opentsdb
-LOG_DIR=/var/vcap/sys/log/opentsdb
-PIDFILE=$RUN_DIR/opentsdb.pid
-TSDTMP=/var/vcap/data/tsdtmp
+BIN_DIR="/var/vcap/jobs/opentsdb/bin"
+RUN_DIR="/var/vcap/sys/run/opentsdb"
+LOG_DIR="/var/vcap/sys/log/opentsdb"
+PIDFILE="${RUN_DIR}/opentsdb.pid"
+TSDTMP="/var/vcap/data/tsdtmp"
+FAKE_ZOOKEEPER_HOSTNAME="fake-zookeeper-hostname"
-TSDB=$BIN_DIR/tsdb
-CLEANUP_SCRIPT=/etc/cron.d/cleanup_tsdb_tmpdir
+TSDB="${BIN_DIR}/tsdb"
+CLEANUP_SCRIPT="/etc/cron.d/cleanup_tsdb_tmpdir"
source /var/vcap/packages/common/utils.sh
@@ -19,6 +21,11 @@ case $1 in
start)
pid_guard $PIDFILE "OpenTSDB"
+ if [ ! -f /etc/hosts_saved ]; then
+ cp /etc/hosts /etc/hosts_saved
+ echo "<%= properties.hbase_master.address %> $FAKE_ZOOKEEPER_HOSTNAME" >> /etc/hosts
+ fi
+
mkdir -p $RUN_DIR
mkdir -p $LOG_DIR
echo $$ > $PIDFILE
@@ -36,8 +43,8 @@ case $1 in
echo "*/10 * * * * root $BIN_DIR/cleanup_tmpdir $TSDTMP 400" > $CLEANUP_SCRIPT
cd /var/vcap/packages/opentsdb/
-
- env COMPRESSION=snappy HBASE_HOME=/var/vcap/packages/hbase ./src/create_table.sh
+ export HBASE_HOME=/var/vcap/packages/hbase
+ env COMPRESSION=SNAPPY HBASE_HOME=/var/vcap/packages/hbase ./src/create_table.sh
METRICS=$( cat <<SETVAR
mem
@@ -105,12 +112,14 @@ case $1 in
SETVAR
)
- $TSDB mkmetric $METRICS
+ echo "$TSDB mkmetric --zkquorum=$FAKE_ZOOKEEPER_HOSTNAME $METRICS"
+ $TSDB mkmetric --zkquorum=$FAKE_ZOOKEEPER_HOSTNAME $METRICS
exec $TSDB tsd \
--port=<%= properties.opentsdb.port %> \
--staticroot=build/staticroot \
--cachedir=$TSDTMP \
+ --zkquorum=$FAKE_ZOOKEEPER_HOSTNAME \
>>$LOG_DIR/opentsdb.stdout.log \
2>>$LOG_DIR/opentsdb.stderr.log
View
8 packages/hadoop/packaging
@@ -0,0 +1,8 @@
+# abort script on any command that exit with a non zero value
+set -e
+
+tar zxf hadoop/hadoop-0.20.2-cdh3u4.tar.gz
+
+cd hadoop-0.20.2-cdh3u4
+
+cp -a * $BOSH_INSTALL_TARGET
View
4 packages/hadoop/spec
@@ -0,0 +1,4 @@
+---
+name: hadoop
+files:
+- hadoop/hadoop-0.20.2-cdh3u4.tar.gz
View
10 packages/hbase/packaging
@@ -1,14 +1,8 @@
# abort script on any command that exit with a non zero value
set -e
-tar zxf hbase/hbase-0.90.3-cdh3u1.tar.gz
+tar zxf hbase/hbase-0.90.6-cdh3u4-vmware-patched.tar.gz
-cd hbase-0.90.3-cdh3u1
+cd hbase-0.90.6-cdh3u4
cp -a * $BOSH_INSTALL_TARGET
-
-cd $BOSH_COMPILE_TARGET
-
-tar zxf hbase/native-lib.tar.gz
-
-cp -R native/* $BOSH_INSTALL_TARGET/lib/native
View
3  packages/hbase/spec
@@ -1,5 +1,4 @@
---
name: hbase
files:
-- hbase/hbase-0.90.3-cdh3u1.tar.gz
-- hbase/native-lib.tar.gz
+- hbase/hbase-0.90.6-cdh3u4-vmware-patched.tar.gz
Please sign in to comment.
Something went wrong with that request. Please try again.