Skip to content
Find file
Fetching contributors…
Cannot retrieve contributors at this time
executable file 1848 lines (1550 sloc) 64.1 KB
#!/usr/bin/env bash
#the following script setup the log system described at:
#http://cleversoft.wordpress.com/2013/04/05/887/
trap _cleanup SIGINT SIGTERM #trap ctrl-c
apps_deps="git-core byobu htop rsync curl bzip2 gzip html2text ncurses-bin
command-not-found openjdk-7-jre-headless build-essential ntp unzip apache2"
apps_purge="xinetd sasl2-bin sendmail-base sendmail-bin sensible-mda rmail
bsd-mailx sendmail"
logstash_version="1.1.13"
elasticsearch_version="0.20.6"
redis_version="2.6.14"
ES_HOME=/home/elasticsearch
REDIS_HOME=/home/redis
LOGSTASH_HOME=/home/logstash
_header() {
clear
printf "%b\\n" "\033[1m----------------------\033[7m log setup stack \033[0m\033[1m--------------------------\033[0m"
printf "%b\\n" "\033[1m logstash:\033[0m ${logstash_version}"
printf "%b\\n" "\033[1m elasticsearch:\033[0m ${elasticsearch_version}"
printf "%b\\n" "\033[1m redis:\033[0m ${redis_version}"
printf "%b\\n" "\033[1m--------------------------------------------------------------------\033[0m"
printf "\\n"
}
_cmd() {
[ -z "${1}" ] && return 0
printf "%s\\n" " $ ${@}"
eval "${@}"
status="${?}"
[ X"${status}" != X"0" ] && exit "${status}" || return
}
_cmdsudo() {
[ -z "${1}" ] && return 0
printf "%s\\n" " $ ${@}"
printf "%s\\n" "${sudopwd}" | ${sudocmd} eval "${@}"
status="${?}"
[ X"${status}" != X"0" ] && exit "${status}" || return
}
_userexist() {
if id -u "${1}" >/dev/null 2>&1; then
return 0
else
return 1
fi
}
_handscui() {
[ -z "${1}" ] && { printf "%5s\n" ""; return 1; }
pid="${1}"
animation_state="1"
if [ ! "$(ps -p "${pid}" -o comm=)" ]; then
printf "%5s\n" ""
return
fi
printf "%s" " "
while [ "$(ps -p "${pid}" -o comm=)" ]; do
printf "%b" "\b\b\b\b\b"
case "${animation_state}" in
1) printf "%s" '\o@o\'
animation_state="2" ;;
2) printf "%s" '|o@o|'
animation_state="3" ;;
3) printf "%s" '/o@o/'
animation_state="4" ;;
4) printf "%s" '|o@o|'
animation_state="1" ;;
esac
sleep 1
done
printf "%b" "\b\b\b\b\b" && printf "%5s\n" ""
}
_getroot() {
if [ ! X"${LOGNAME}" = X"root" ]; then
printf "%s\\n" "Detecting user ${LOGNAME} (non-root) ..."
printf "%s\\n" "Checking if sudo is available ..."
if command -v "sudo" >/dev/null 2>&1; then
sudo -K
if [ -n "${sudopwd}" ]; then
# password check
_getroot__test="$(printf "%s\\n" "${sudopwd}" | sudo -S ls 2>&1)"
_getroot__status="${?}"
_getroot__not_allowed="$(printf "%s" "${_getroot__test}" | \
grep -i "sudoers")"
if [ -n "${_getroot__not_allowed}" ]; then
printf "%s %s\\n" "You're not allowed to use sudo," \
"get in contact with your local administrator"
exit
fi
if [ X"${_getroot__status}" != X"0" ]; then
sudopwd=""
printf "%s\\n" "Incorrect preseed password"
exit
else
sudocmd="sudo -S"
fi
printf "%s\\n" " - all set ..."
return
fi
i=0 ; while [ "${i}" -lt "3" ]; do
i="$((${i} + 1))"
printf "%s" " - enter sudo password: "
stty -echo
read sudopwd
stty echo
# password check
_getroot__test="$(printf "%s\\n" "${sudopwd}" | sudo -S ls 2>&1)"
_getroot__status="${?}"
_getroot__not_allowed="$(printf "%s" "${_getroot__test}" | \
grep -i "sudoers")"
if [ -n "${_getroot__not_allowed}" ]; then
printf "\\n%s %s\\n" "You're not allowed to use sudo," \
"get in contact with your local administrator"
exit
fi
printf "\\n"
if [ X"${_getroot__status}" != X"0" ]; then
sudopwd=""
else
sudocmd="sudo -S"
break
fi
done
if [ -z "${sudopwd}" ]; then
printf "%s\\n" "Failed authentication"
exit
fi
else
printf "%s %s\\n" "You're not root and sudo isn't available." \
"Please run this script as root!"
exit
fi
fi
}
_cleanup() {
printf "\\n"
printf "%b\\n" "\033[1m-------------------\033[7m Cleanup \033[0m\033[1m-------------------\033[0m"
printf "%s\\n" "[+] removing files ..."
_cmdsudo rm -rf rm -rf ${ES_HOME}
_cmdsudo rm -rf rm -rf ${LOGSTASH_HOME}
_cmdsudo rm -rf rm -rf ${REDIS_HOME}
_cmdsudo rm -rf /etc/init.d/elasticsearch
_cmdsudo rm -rf /etc/init.d/redis
_cmdsudo rm -rf /etc/init.d/logstash-indexer
_cmdsudo rm -rf /etc/init.d/logstash-shipper
_cmdsudo rm -rf /var/www/*
[ -z "${1}" ] && exit
}
_waitfor() {
[ -z "${1}" ] && return 1
printf "%s" " $ ${@} ..."
eval "${@}" > /dev/null 2>&1 &
sleep 1s
_handscui "$(pidof ${1})"
}
_waitforsudo() {
[ -z "${1}" ] && return 1
printf "%s" " $ sudo ${@} ..."
printf "%s\\n" "${sudopwd}" | ${sudocmd} eval "${@}" > /dev/null 2>&1 &
sleep 1s
_handscui "$(pidof ${1})"
}
_mk_conf_files() {
cat > elasticsearch.yml << 'EOF'
##################### ElasticSearch Configuration Example #####################
# This file contains an overview of various configuration settings,
# targeted at operations staff. Application developers should
# consult the guide at <http://elasticsearch.org/guide>.
#
# The installation procedure is covered at
# <http://elasticsearch.org/guide/reference/setup/installation.html>.
#
# ElasticSearch comes with reasonable defaults for most settings,
# so you can try it out without bothering with configuration.
#
# Most of the time, these defaults are just fine for running a production
# cluster. If you're fine-tuning your cluster, or wondering about the
# effect of certain configuration option, please _do ask_ on the
# mailing list or IRC channel [http://elasticsearch.org/community].
# Any element in the configuration can be replaced with environment variables
# by placing them in ${...} notation. For example:
#
# node.rack: ${RACK_ENV_VAR}
# See <http://elasticsearch.org/guide/reference/setup/configuration.html>
# for information on supported formats and syntax for the configuration file.
################################### Cluster ###################################
# Cluster name identifies your cluster for auto-discovery. If you're running
# multiple clusters on the same network, make sure you're using unique names.
#
cluster.name: elasticsearch-kibana
#################################### Node #####################################
# Node names are generated dynamically on startup, so you're relieved
# from configuring them manually. You can tie this node to a specific name:
#
node.name: "elasticsearch-node"
# Every node can be configured to allow or deny being eligible as the master,
# and to allow or deny to store the data.
#
# Allow this node to be eligible as a master node (enabled by default):
#
# node.master: true
#
# Allow this node to store data (enabled by default):
#
# node.data: true
# You can exploit these settings to design advanced cluster topologies.
#
# 1. You want this node to never become a master node, only to hold data.
# This will be the "workhorse" of your cluster.
#
# node.master: false
# node.data: true
#
# 2. You want this node to only serve as a master: to not store any data and
# to have free resources. This will be the "coordinator" of your cluster.
#
# node.master: true
# node.data: false
#
# 3. You want this node to be neither master nor data node, but
# to act as a "search load balancer" (fetching data from nodes,
# aggregating results, etc.)
#
# node.master: false
# node.data: false
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
# Node Info API [http://localhost:9200/_cluster/nodes] or GUI tools
# such as <http://github.com/lukas-vlcek/bigdesk> and
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
# A node can have generic attributes associated with it, which can later be used
# for customized shard allocation filtering, or allocation awareness. An attribute
# is a simple key value pair, similar to node.key: value, here is an example:
#
# node.rack: rack314
# By default, multiple nodes are allowed to start from the same installation location
# to disable it, set the following:
# node.max_local_storage_nodes: 1
#################################### Index ####################################
# You can set a number of options (such as shard/replica options, mapping
# or analyzer definitions, translog settings, ...) for indices globally,
# in this file.
#
# Note, that it makes more sense to configure index settings specifically for
# a certain index, either when creating it or by using the index templates API.
#
# See <http://elasticsearch.org/guide/reference/index-modules/> and
# <http://elasticsearch.org/guide/reference/api/admin-indices-create-index.html>
# for more information.
# Set the number of shards (splits) of an index (5 by default):
#
# index.number_of_shards: 5
# Set the number of replicas (additional copies) of an index (1 by default):
#
# index.number_of_replicas: 1
# Note, that for development on a local machine, with small indices, it usually
# makes sense to "disable" the distributed features:
#
# index.number_of_shards: 1
# index.number_of_replicas: 0
# These settings directly affect the performance of index and search operations
# in your cluster. Assuming you have enough machines to hold shards and
# replicas, the rule of thumb is:
#
# 1. Having more *shards* enhances the _indexing_ performance and allows to
# _distribute_ a big index across machines.
# 2. Having more *replicas* enhances the _search_ performance and improves the
# cluster _availability_.
#
# The "number_of_shards" is a one-time setting for an index.
#
# The "number_of_replicas" can be increased or decreased anytime,
# by using the Index Update Settings API.
#
# ElasticSearch takes care about load balancing, relocating, gathering the
# results from nodes, etc. Experiment with different settings to fine-tune
# your setup.
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
# the index status.
#################################### Paths ####################################
# Path to directory containing configuration (this file and logging.yml):
#
path.conf: /home/elasticsearch/config
# Path to directory where to store index data allocated for this node.
#
path.data: /home/elasticsearch/data
#
# Can optionally include more than one location, causing data to be striped across
# the locations (a la RAID 0) on a file level, favouring locations with most free
# space on creation. For example:
#
# path.data: /path/to/data1,/path/to/data2
# Path to temporary files:
#
path.work: /home/elasticsearch/tmp
# path.work: /path/to/work
# Path to log files:
#
path.logs: /home/elasticsearch/logs
# path.logs: /path/to/logs
# Path to where plugins are installed:
#
# path.plugins: /path/to/plugins
#################################### Plugin ###################################
# If a plugin listed here is not installed for current node, the node will not start.
#
# plugin.mandatory: mapper-attachments,lang-groovy
################################### Memory ####################################
# ElasticSearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
#
# Set this property to true to lock the memory:
#
bootstrap.mlockall: true
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate
# for ElasticSearch, leaving enough memory for the operating system itself.
#
# You should also make sure that the ElasticSearch process is allowed to lock
# the memory, eg. by using `ulimit -l unlimited`.
############################## Network And HTTP ###############################
# ElasticSearch, by default, binds itself to the 0.0.0.0 address, and listens
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
# communication. (the range means that if the port is busy, it will automatically
# try the next port).
# Set the bind address specifically (IPv4 or IPv6):
#
# network.bind_host: 192.168.0.1
# Set the address other nodes will use to communicate with this node. If not
# set, it is automatically derived. It must point to an actual IP address.
#
# network.publish_host: 192.168.0.1
# Set both 'bind_host' and 'publish_host':
#
# network.host: 192.168.0.1
# Set a custom port for the node to node communication (9300 by default):
#
# transport.tcp.port: 9300
# Enable compression for all communication between nodes (disabled by default):
#
# transport.tcp.compress: true
# Set a custom port to listen for HTTP traffic:
#
# http.port: 9200
# Set a custom allowed content length:
#
# http.max_content_length: 100mb
# Disable HTTP completely:
#
# http.enabled: false
################################### Gateway ###################################
# The gateway allows for persisting the cluster state between full cluster
# restarts. Every change to the state (such as adding an index) will be stored
# in the gateway, and when the cluster starts up for the first time,
# it will read its state from the gateway.
# There are several types of gateway implementations. For more information,
# see <http://elasticsearch.org/guide/reference/modules/gateway>.
# The default gateway type is the "local" gateway (recommended):
#
# gateway.type: local
# Settings below control how and when to start the initial recovery process on
# a full cluster restart (to reuse as much local data as possible when using shared
# gateway).
# Allow recovery process after N nodes in a cluster are up:
#
# gateway.recover_after_nodes: 1
# Set the timeout to initiate the recovery process, once the N nodes
# from previous setting are up (accepts time value):
#
# gateway.recover_after_time: 5m
# Set how many nodes are expected in this cluster. Once these N nodes
# are up (and recover_after_nodes is met), begin recovery process immediately
# (without waiting for recover_after_time to expire):
#
# gateway.expected_nodes: 2
############################# Recovery Throttling #############################
# These settings allow to control the process of shards allocation between
# nodes during initial recovery, replica allocation, rebalancing,
# or when adding and removing nodes.
# Set the number of concurrent recoveries happening on a node:
#
# 1. During the initial recovery
#
# cluster.routing.allocation.node_initial_primaries_recoveries: 4
#
# 2. During adding/removing nodes, rebalancing, etc
#
# cluster.routing.allocation.node_concurrent_recoveries: 2
# Set to throttle throughput when recovering (eg. 100mb, by default unlimited):
#
# indices.recovery.max_size_per_sec: 0
# Set to limit the number of open concurrent streams when
# recovering a shard from a peer:
#
# indices.recovery.concurrent_streams: 5
################################## Discovery ##################################
# Discovery infrastructure ensures nodes can be found within a cluster
# and master node is elected. Multicast discovery is the default.
# Set to ensure a node sees N other master eligible nodes to be considered
# operational within the cluster. Set this option to a higher value (2-4)
# for large clusters (>3 nodes):
#
# discovery.zen.minimum_master_nodes: 1
# Set the time to wait for ping responses from other nodes when discovering.
# Set this option to a higher value on a slow or congested network
# to minimize discovery failures:
#
# discovery.zen.ping.timeout: 3s
# See <http://elasticsearch.org/guide/reference/modules/discovery/zen.html>
# for more information.
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
#
# 1. Disable multicast discovery (enabled by default):
#
# discovery.zen.ping.multicast.enabled: false
#
# 2. Configure an initial list of master nodes in the cluster
# to perform discovery when new nodes (master or data) are started:
#
# discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"]
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
#
# You have to install the cloud-aws plugin for enabling the EC2 discovery.
#
# See <http://elasticsearch.org/guide/reference/modules/discovery/ec2.html>
# for more information.
#
# See <http://elasticsearch.org/tutorials/2011/08/22/elasticsearch-on-ec2.html>
# for a step-by-step tutorial.
################################## Slow Log ##################################
# Shard level query and fetch threshold logging.
#index.search.slowlog.threshold.query.warn: 10s
#index.search.slowlog.threshold.query.info: 5s
#index.search.slowlog.threshold.query.debug: 2s
#index.search.slowlog.threshold.query.trace: 500ms
#index.search.slowlog.threshold.fetch.warn: 1s
#index.search.slowlog.threshold.fetch.info: 800ms
#index.search.slowlog.threshold.fetch.debug: 500ms
#index.search.slowlog.threshold.fetch.trace: 200ms
#index.indexing.slowlog.threshold.index.warn: 10s
#index.indexing.slowlog.threshold.index.info: 5s
#index.indexing.slowlog.threshold.index.debug: 2s
#index.indexing.slowlog.threshold.index.trace: 500ms
################################## GC Logging ################################
#monitor.jvm.gc.ParNew.warn: 1000ms
#monitor.jvm.gc.ParNew.info: 700ms
#monitor.jvm.gc.ParNew.debug: 400ms
#monitor.jvm.gc.ConcurrentMarkSweep.warn: 10s
#monitor.jvm.gc.ConcurrentMarkSweep.info: 5s
#monitor.jvm.gc.ConcurrentMarkSweep.debug: 2s
EOF
cat > elasticsearch.conf << 'EOF'
set.default.ES_HOME=/home/elasticsearch
set.default.ES_HEAP_SIZE=1024
#********************************************************************
# Wrapper Timeout Properties
#********************************************************************
# How long to wait for the JVM to start (in seconds)
wrapper.startup.timeout=300
# How long to wait for the JVM to stop (in seconds)
wrapper.shutdown.timeout=300
# When a ping will timeout to consider the JVM hung (in seconds)
wrapper.ping.timeout=300
#********************************************************************
# Wrapper Java Properties
#********************************************************************
# Java Application
wrapper.java.command=java
# Tell the Wrapper to log the full generated Java command line.
#wrapper.java.command.loglevel=INFO
wrapper.java.mainclass=org.tanukisoftware.wrapper.WrapperSimpleApp
wrapper.working.dir=%ES_HOME%
# Java Classpath (include wrapper.jar) Add class path elements as
# needed starting from 1
wrapper.java.classpath.1=%ES_HOME%/bin/service/lib/wrapper.jar
wrapper.java.classpath.2=%ES_HOME%/lib/elasticsearch*.jar
wrapper.java.classpath.3=%ES_HOME%/lib/*.jar
wrapper.java.classpath.4=%ES_HOME%/lib/sigar/*.jar
# Java Library Path (location of Wrapper.DLL or libwrapper.so)
wrapper.java.library.path.1=%ES_HOME%/bin/service/lib
# Java Bits. On applicable platforms, tells the JVM to run in 32 or 64-bit mode.
wrapper.java.additional.auto_bits=TRUE
# Java Additional Parameters
wrapper.java.additional.1=-Delasticsearch-service
wrapper.java.additional.2=-Des.path.home=%ES_HOME%
wrapper.java.additional.3=-Xss256k
wrapper.java.additional.4=-XX:+UseParNewGC
wrapper.java.additional.5=-XX:+UseConcMarkSweepGC
wrapper.java.additional.6=-XX:CMSInitiatingOccupancyFraction=75
wrapper.java.additional.7=-XX:+UseCMSInitiatingOccupancyOnly
wrapper.java.additional.8=-XX:+HeapDumpOnOutOfMemoryError
wrapper.java.additional.9=-Djava.awt.headless=true
wrapper.java.additional.10=-Des.max-open-files=true
# Initial Java Heap Size (in MB)
wrapper.java.initmemory=%ES_HEAP_SIZE%
# Maximum Java Heap Size (in MB)
wrapper.java.maxmemory=%ES_HEAP_SIZE%
# Application parameters. Add parameters as needed starting from 1
wrapper.app.parameter.1=org.elasticsearch.bootstrap.ElasticSearchF
#********************************************************************
# Wrapper Logging Properties
#********************************************************************
# Enables Debug output from the Wrapper.
# wrapper.debug=TRUE
# Format of output for the console. (See docs for formats)
wrapper.console.format=PM
# Log Level for console output. (See docs for log levels)
wrapper.console.loglevel=INFO
# Log file to use for wrapper output logging.
wrapper.logfile=%ES_HOME%/logs/service.log
# Format of output for the log file. (See docs for formats)
wrapper.logfile.format=LPTM
# Log Level for log file output. (See docs for log levels)
wrapper.logfile.loglevel=INFO
# Maximum size that the log file will be allowed to grow to before
# the log is rolled. Size is specified in bytes. The default value
# of 0, disables log rolling. May abbreviate with the 'k' (kb) or
# 'm' (mb) suffix. For example: 10m = 10 megabytes.
wrapper.logfile.maxsize=10m
# Maximum number of rolled log files which will be allowed before old
# files are deleted. The default value of 0 implies no limit.
wrapper.logfile.maxfiles=10
# Log Level for sys/event log output. (See docs for log levels)
wrapper.syslog.loglevel=NONE
#********************************************************************
# Wrapper General Properties
#********************************************************************
# Allow for the use of non-contiguous numbered properties
wrapper.ignore_sequence_gaps=TRUE
# Title to use when running as a console
wrapper.console.title=ElasticSearch
#********************************************************************
# Wrapper Windows NT/2000/XP Service Properties
#********************************************************************
# WARNING - Do not modify any of these properties when an application
# using this configuration file has been installed as a service.
# Please uninstall the service before modifying this section. The
# service can then be reinstalled.
# Name of the service
wrapper.name=ElasticSearch
# Display name of the service
wrapper.displayname=ElasticSearch
# Description of the service
wrapper.description=Open Source, Distributed, RESTful Search Engine
# Service dependencies. Add dependencies as needed starting from 1
wrapper.ntservice.dependency.1=
# Mode in which the service is installed. AUTO_START, DELAY_START or DEMAND_START
wrapper.ntservice.starttype=AUTO_START
# Allow the service to interact with the desktop.
wrapper.ntservice.interactive=false
EOF
cat > redis.conf << 'EOF'
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize yes
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
# bind 127.0.0.1
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 300
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 60
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir ./
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one wtih priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
EOF
cat > redis << 'EOF'
#!/bin/sh
#
# Simple Redis init.d script conceived to work on Linux systems
# as it does use of the /proc filesystem.
REDISUSER="redis"
REDISPORT=6379
EXEC=/home/redis/src/redis-server
CLIEXEC=/home/redis/src/redis-cli
PIDFILE=/var/run/redis_${REDISPORT}.pid
CONF="/etc/redis/redis.conf"
case "$1" in
start)
if [ -f $PIDFILE ]
then
printf "%s\\n" "$PIDFILE exists, process is already running or crashed"
else
printf "%s\\n" "Starting Redis server..."
/bin/su - $REDISUSER -c "$EXEC $CONF"
fi
;;
stop)
if [ ! -f $PIDFILE ]
then
printf "%s\\n" "$PIDFILE does not exist, process is not running"
else
PID=$(cat $PIDFILE)
printf "%s\\n" "Stopping ..."
$CLIEXEC -p $REDISPORT shutdown
while [ -x /proc/${PID} ]
do
printf "%s\\n" "Waiting for Redis to shutdown ..."
sleep 1
done
printf "%s\\n" "Redis stopped"
fi
;;
*)
printf "%s\\n" "Please use start or stop as first argument"
;;
esac
EOF
cat > indexer.conf << 'EOF'
input {
redis {
host => "localhost"
type => "redis-input"
data_type => "list"
key => "logstash"
format => "json_event"
}
}
output {
stdout { debug => true debug_format => "json"}
elasticsearch {
host => "localhost"
port => "9300"
cluster => "elasticsearch-kibana"
}
}
EOF
cat > shipper.conf << 'EOF'
input {
relp {
type => syslog
port => 2514
}
}
output {
redis {
host => "localhost"
data_type => "list"
key => "logstash"
}
}
EOF
cat > logstash-indexer << 'EOF'
#! /bin/sh
### BEGIN INIT INFO
# Provides: logstash-indexer
# Required-Start: $local_fs $remote_fs
# Required-Stop: $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: Starts Logstash-indexer as a daemon.
# Modified originally from https://gist.github.com/2228905#file_logstash.sh
### END INIT INFO
# Amount of memory for Java
#JAVAMEM=256M
# Location of logstash files
LOCATION=/home/logstash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
DESC="Logstash Indexer Daemon"
NAME=java
DAEMON=`which java`
CONFIG_DIR=/home/logstash/indexer.conf
LOGFILE=/home/logstash/indexer.log
JARNAME=logstash.jar
#ARGS="-Xmx$JAVAMEM -Xms$JAVAMEM -jar ${JARNAME} agent --config ${CONFIG_DIR} --log ${LOGFILE} --grok-patterns-path ${PATTERNSPATH}"
#ARGS="-jar ${JARNAME} agent --config ${CONFIG_DIR} --log ${LOGFILE}"
ARGS="-jar ${JARNAME} agent --config ${CONFIG_DIR}"
SCRIPTNAME=/etc/init.d/logstash
PIDFILE=/var/run/logstash-indexer.pid
base=logstash
# Exit if the package is not installed
if [ ! -x "$DAEMON" ]; then
{
printf "%s\\n" "Couldn't find $DAEMON"
exit 99
}
fi
if [ "$(id -u)" != "0" ]; then
printf "%s\\n" "Not root" 1>&2
exit 99
fi
. /lib/lsb/init-functions
checkpid() {
local i
for i in $* ; do
[ -d "/proc/$i" ] || return 1
done
return 0
}
#
# Function that starts the daemon/service
#
do_start() {
[ -f "$PIDFILE" ] && { printf "%s\\n" "Already running"; exit; }
cd $LOCATION && \
($DAEMON $ARGS &) \
&& log_end_msg 0 || log_end_msg 1
}
set_pidfile() {
pgrep -f "$DAEMON[[:space:]]*$ARGS" > $PIDFILE
}
#
# Function that stops the daemon/service
#
do_stop() {
[ -f "$PIDFILE" ] || { printf "%s\\n" "Not pidfile found"; exit; }
pid=`cat $PIDFILE`
if checkpid $pid 2>&1; then
# TERM first, then KILL if not dead
kill -TERM $pid >/dev/null 2>&1
sleep 3
if checkpid $pid && sleep 1 &&
checkpid $pid && sleep 1 &&
checkpid $pid ; then
kill -KILL $pid >/dev/null 2>&1
sleep 3
fi
fi
checkpid $pid
RC=$?
[ "$RC" -eq 0 ] && log_end_msg 1 || log_end_msg 0
}
case "$1" in
start)
printf "%s" "Starting $DESC: "
do_start
set_pidfile
;;
stop)
printf "%s" "Stopping $DESC: "
do_stop
rm $PIDFILE
;;
restart|reload)
printf "%s" "Restarting $DESC: "
do_stop
do_start
set_pidfile
;;
status)
status -p $PIDFILE
;;
*)
printf "%s\\n" "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
exit 3
;;
esac
printf "\\n"
exit 0
EOF
cat > logstash-shipper << 'EOF'
#! /bin/sh
### BEGIN INIT INFO
# Provides: logstash-shipper
# Required-Start: $local_fs $remote_fs
# Required-Stop: $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: Starts Logstash-shipper as a daemon.
# Modified originally from https://gist.github.com/2228905#file_logstash.sh
### END INIT INFO
# Amount of memory for Java
#JAVAMEM=256M
# Location of logstash files
LOCATION=/home/logstash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
DESC="Logstash Shipper Daemon"
NAME=java
DAEMON=`which java`
CONFIG_DIR=/home/logstash/shipper.conf
LOGFILE=/home/logstash/shipper.log
JARNAME=logstash.jar
#ARGS="-Xmx$JAVAMEM -Xms$JAVAMEM -jar ${JARNAME} agent --config ${CONFIG_DIR} --log ${LOGFILE} --grok-patterns-path ${PATTERNSPATH}"
#ARGS="-jar ${JARNAME} agent --config ${CONFIG_DIR} --log ${LOGFILE}"
ARGS="-jar ${JARNAME} agent --config ${CONFIG_DIR}"
SCRIPTNAME=/etc/init.d/logstash
PIDFILE=/var/run/logstash-shipper.pid
base=logstash
# Exit if the package is not installed
if [ ! -x "$DAEMON" ]; then
{
printf "%s\\n" "Couldn't find $DAEMON"
exit 99
}
fi
if [ "$(id -u)" != "0" ]; then
printf "%s\\n" "Not root" 1>&2
exit 99
fi
. /lib/lsb/init-functions
checkpid() {
local i
for i in $* ; do
[ -d "/proc/$i" ] || return 1
done
return 0
}
#
# Function that starts the daemon/service
#
do_start() {
[ -f "$PIDFILE" ] && { printf "%s\\n" "Already running"; exit; }
cd $LOCATION && \
($DAEMON $ARGS &) \
&& log_end_msg 0 || log_end_msg 1
}
set_pidfile() {
pgrep -f "$DAEMON[[:space:]]*$ARGS" > $PIDFILE
}
#
# Function that stops the daemon/service
#
do_stop() {
[ -f "$PIDFILE" ] || { printf "%s\\n" "Not pidfile found"; exit; }
pid=`cat $PIDFILE`
if checkpid $pid 2>&1; then
# TERM first, then KILL if not dead
kill -TERM $pid >/dev/null 2>&1
sleep 3
if checkpid $pid && sleep 1 &&
checkpid $pid && sleep 1 &&
checkpid $pid ; then
kill -KILL $pid >/dev/null 2>&1
sleep 3
fi
fi
checkpid $pid
RC=$?
[ "$RC" -eq 0 ] && log_end_msg 1 || log_end_msg 0
}
case "$1" in
start)
printf "%s" "Starting $DESC: "
do_start
set_pidfile
;;
stop)
printf "%s" "Stopping $DESC: "
do_stop
rm $PIDFILE
;;
restart|reload)
printf "%s" "Restarting $DESC: "
do_stop
do_start
set_pidfile
;;
status)
status -p $PIDFILE
;;
*)
printf "%s\\n" "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
exit 3
;;
esac
printf "\\n"
exit 0
EOF
[ ! -f elasticsearch.yml ] && { printf "%s\\n" "Generation failed: elasticsearch.yml"; exit 1; }
[ ! -f elasticsearch.conf ] && { printf "%s\\n" "Generation failed: elasticsearch.conf"; exit 1; }
[ ! -f redis.conf ] && { printf "%s\\n" "Generation failed: redis.conf"; exit 1; }
[ ! -f redis ] && { printf "%s\\n" "Generation failed: redis"; exit 1; }
[ ! -f indexer.conf ] && { printf "%s\\n" "Generation failed: indexer.conf"; exit 1; }
[ ! -f shipper.conf ] && { printf "%s\\n" "Generation failed: shipper.conf"; exit 1; }
}
_fixdependencies() {
printf "%b\\n" "\033[1m----------------------\033[7m Fixing dependencies \033[0m\033[1m-------------------------\033[0m"
if [ -f /usr/bin/lsb_release ]; then
DISTRO="$(lsb_release -si)"
RELEASE="$(lsb_release -s -c)"
else
if [ -d /etc/dpkg ]; then
DISTRO="$(grep -i vendor /etc/dpkg/origins/default | head -1 | cut -d' ' -f2)"
RELEASE="$(grep '^deb .*' /etc/apt/sources.list | head -1 | cut -d' ' -f 3)"
fi
fi
case "${DISTRO}" in
Ubuntu) printf "%s\\n" "deb http://archive.ubuntu.com/ubuntu/ ${RELEASE} multiverse" > multiverse.list
printf "%s\\n" "${sudopwd}" | ${sudocmd} mv multiverse.list /etc/apt/sources.list.d/ > /dev/null 2>&1 ;;
esac
printf "%s\\n" "[+] installing deps ..."
_waitforsudo apt-get update
_waitforsudo apt-get install --no-install-recommends -y ${apps_deps}
printf "%s\\n" "[+] purging non essential apps ..."
_waitforsudo apt-get purge -y ${apps_purge}
printf "%s\\n" "[+] reconfiguring time zone to America/Mexico_City ..."
printf "%s\\n" "America/Mexico_City" > .timezone
_cmdsudo mv .timezone /etc/timezone
_cmdsudo dpkg-reconfigure -f noninteractive tzdata
printf "%s\\n" "[+] fixing locales ... "
_waitforsudo locale-gen en_US en_US.UTF-8
_cmdsudo dpkg-reconfigure -f noninteractive locales
[ ! -f /usr/bin/git ] && { printf "%s\\n" "Dependecy step failed"; exit 1; }
}
_elasticsearch() {
#requires openjdk-7-jre-headless
printf "%b\\n" "\033[1m-----------------------\033[7m Elasticsearch \033[0m\033[1m----------------------------\033[0m"
#TODO 18-07-2013 18:45 >> remove '/home/elasticsearch' hardcoded vars, use sed
if ! _userexist elasticsearch; then
printf "%s\\n" "[+] creating user for service ... "
_cmdsudo useradd -d ${ES_HOME} -s /bin/sh elasticsearch
fi
printf "%s\\n" "export ES_HOME=${ES_HOME}" > .root_bash_profile
printf "%s\\n" "[+] downloading app ... "
_waitfor wget -c https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-${elasticsearch_version}.tar.gz
_waitfor wget -c http://github.com/elasticsearch/elasticsearch-servicewrapper/archive/master.zip
printf "%s\\n" "[+] uncompresing files ... "
_waitfor "zcat < elasticsearch-${elasticsearch_version}.tar.gz | tar xvf -"
_waitfor unzip master.zip
printf "%s\\n" "[+] installing ... "
_cmd mv elasticsearch-servicewrapper-master/service/ elasticsearch-${elasticsearch_version}/bin/
_cmd mv elasticsearch.yml ./elasticsearch-${elasticsearch_version}/config/
_cmd mv elasticsearch.conf ./elasticsearch-${elasticsearch_version}/bin/service
_cmd ./elasticsearch-${elasticsearch_version}/bin/plugin -install mobz/elasticsearch-head
_cmd rm -r elasticsearch-servicewrapper-master elasticsearch-${elasticsearch_version}.tar.gz master.zip
_cmd mkdir ./elasticsearch-${elasticsearch_version}/data
_cmdsudo mv elasticsearch-${elasticsearch_version} ${ES_HOME}
_cmdsudo ${ES_HOME}/bin/service/elasticsearch install
printf "%s\\n" "${sudopwd}" | ${sudocmd} sed -i -e "/#RUN_AS_USER/ s:.*:RUN_AS_USER=elasticsearch:" /etc/init.d/elasticsearch
printf "%s\\n" "${sudopwd}" | ${sudocmd} sed -i -e "/#ULIMIT_N/ s:.*:ULIMIT_N=65535:" /etc/init.d/elasticsearch
printf "%s\\n" "${sudopwd}" | ${sudocmd} sed -i -e "/export ES_HOME/ s:.*:export ES_HOME=\"${ES_HOME}\":" /etc/init.d/elasticsearch
printf "%s\\n" "${sudopwd}" | ${sudocmd} sed -i -e "/^PIDDIR=/ s:.*:PIDDIR=\"${ES_HOME}\":" /etc/init.d/elasticsearch
_cmdsudo chmod +x /etc/init.d/elasticsearch
#_cmdsudo update-rc.d elasticsearch defaults #not needed
_cmdsudo chown -R elasticsearch:elasticsearch ${ES_HOME}
}
_redis() {
printf "%b\\n" "\033[1m-----------------------\033[7m Redis \033[0m\033[1m----------------------------\033[0m"
if ! _userexist redis; then
printf "%s\\n" "[+] creating user for service ... "
_cmdsudo useradd -d ${REDIS_HOME} -s /bin/sh redis
fi
printf "%s\\n" "export REDIS_HOME=${REDIS_HOME}" >> .root_bash_profile
printf "%s\\n" "[+] downloading app ... "
_waitfor wget -c http://redis.googlecode.com/files/redis-${redis_version}.tar.gz
printf "%s\\n" "[+] uncompresing files ... "
_waitfor "zcat < redis-${redis_version}.tar.gz | tar xvf -"
_cmd rm redis-${redis_version}.tar.gz
printf "%s\\n" "[+] compiling redis ... "
make -C ./redis-${redis_version} distclean > /dev/null 2>&1
make -C ./redis-${redis_version} > /dev/null 2>&1
printf "%s\\n" "[+] installing redis ... "
_cmd cp -- redis.conf ./redis-${redis_version}
_cmdsudo mkdir -p /etc/redis/
_cmdsudo mv redis.conf /etc/redis/
_cmdsudo mv redis /etc/init.d/redis
_cmdsudo chmod +x /etc/init.d/redis
_cmdsudo update-rc.d redis defaults
printf "%s\\n" "${sudopwd}" | ${sudocmd} sed -i -e "/^logfile/ s:.*:logfile ${REDIS_HOME}/redis.log:" /etc/redis/redis.conf
_cmdsudo mv redis-${redis_version} ${REDIS_HOME}
_cmdsudo chown -R redis:redis ${REDIS_HOME}
_cmdsudo chmod 700 ${REDIS_HOME}
}
_logstash() {
#requires openjdk-7-jre-headless
printf "%b\\n" "\033[1m-----------------------\033[7m Logstash (shipper, indexer) \033[0m\033[1m----------------------------\033[0m"
if ! _userexist logstash; then
printf "%s\\n" "[+] creating user for service ... "
_cmdsudo useradd -d ${LOGSTASH_HOME} -s /bin/sh logstash
fi
#printf "%s\\n" "export REDIS_HOME=${REDIS_HOME}" >> .root_bash_profile
printf "%s\\n" "[+] downloading app ... "
#source <(cat .root_bash_profile)
_waitfor wget -c http://logstash.objects.dreamhost.com/release/logstash-${logstash_version}-flatjar.jar
[ ! -f ./logstash-"${logstash_version}"-flatjar.jar ] && { printf "%s\\n" "Download step failed"; exit 1; }
printf "%s\\n" "[+] installing ... "
_cmd mkdir -p logstash
_cmd mv logstash-${logstash_version}-flatjar.jar logstash/logstash.jar
_cmd mv indexer.conf logstash
_cmd mv shipper.conf logstash
_cmdsudo mv logstash-indexer /etc/init.d/
_cmdsudo mv logstash-shipper /etc/init.d/
_cmdsudo chmod +x /etc/init.d/logstash-*
_cmdsudo update-rc.d logstash-indexer defaults
_cmdsudo update-rc.d logstash-shipper defaults
_cmdsudo mv logstash /home
_cmdsudo chown -R logstash:logstash ${LOGSTASH_HOME}
_cmdsudo chmod 700 ${LOGSTASH_HOME}
}
_kibana() {
printf "%b\\n" "\033[1m-----------------------\033[7m Kibana \033[0m\033[1m----------------------------\033[0m"
printf "%s\\n" "[+] downloading app ... "
_waitfor wget -c https://github.com/elasticsearch/kibana/archive/master.tar.gz -O kibana.tar.gz
printf "%s\\n" "[+] uncompresing files ... "
_waitfor "zcat < kibana.tar.gz | tar xvf -"
_cmd rm kibana.tar.gz
printf "%s\\n" "[+] installing ... "
_cmdsudo mv kibana-master/* /var/www/
_cmd rm -rf kibana-master
}
_party() {
printf "%b\\n" "\033[1m-----------------------\033[7m Launching \033[0m\033[1m----------------------------\033[0m"
#source <(cat .root_bash_profile)
_cmdsudo mv .root_bash_profile /root/.bash_profile
if [ -f /etc/init.d/elasticsearch ]; then
_cmdsudo service elasticsearch start
fi
if [ -f /etc/init.d/redis ]; then
_cmdsudo service redis start
fi
if [ -f /etc/init.d/logstash-indexer ]; then
_cmdsudo service logstash-indexer start
fi
if [ -f /etc/init.d/logstash-shipper ]; then
_cmdsudo sudo service logstash-shipper start
fi
external_ip="$(curl -s ifconfig.me)"
printf "%b\\n" "\033[1m----------------------\033[7m DONE \033[0m\033[1m-------------------\033[0m"
printf "\\n"
printf "%s\\n" "You can now add new clients, add the following to their /etc/rsyslog.d/remote.conf file:"
printf "%s\\n" " ${ModLoad} omrelp"
printf "%s\\n" " *.* :omrelp:${external_ip}:2514"
printf "\\n"
printf "%s\\n" "Once you've clients, browse to http://${external_ip}"
printf "\\n"
printf "%s\\n" "See more details at: http://cleversoft.wordpress.com/2013/04/05/887/"
}
_header
_getroot
_fixdependencies
# elasticsearch.yml
# elasticsearch.conf
# redis.conf
# redis
# indexer.conf
# shipper.conf
_mk_conf_files
_elasticsearch
_redis
_logstash
_kibana
_party
Something went wrong with that request. Please try again.