-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathconfig.yml.example
365 lines (294 loc) · 11.2 KB
/
config.yml.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
############
# Statusengine Worker Configuration
############
# Every node in the cluster needs a name
# IT IS REQUIRED THAT THE NAME IS UNIQUE IN THE CLUSTER!
# The name is required to route external commands
# And to know which node executed a check
# So please change the default value, if you have more than one nodes!!
node_name: Crowbar
############
# DATA SOURCE CONFIGURATION
############
# Determine if your Statusengine Broker Model exports all data to a
# gearman-job-server or not.
# Warning: Do not enable use_gearman and use_rabbitmq at the same time.
use_gearman: 1
# Configuration of your gearman-job-server
# The Statusengine Broker Module exports all data as json encoded objects
# to the a gearman-job-server
gearman:
address: 127.0.0.1
port: 4730
timeout: 1000
# Determine if your Statusengine Broker Model exports all data to
# RabbitMQ or not.
# Warning: Do not enable use_rabbitmq and use_gearman at the same time.
#
# NOTICE: RabbitMQ Support is for the new Statusengine Event Broker
# which is in development at the moment.
# See: https://github.com/statusengine/broker
use_rabbitmq: 0
# NOTICE: RabbitMQ Support is for the new Statusengine Event Broker
# which is in development at the moment.
# See: https://github.com/statusengine/broker
rabbitmq:
host: 127.0.0.1
port: 5672
user: statusengine
password: statusengine
vhost: /
exchange: statusengine
durable_exchange: 0
durable_queues: 0
############
# LIVE DATA CONFIGURATION
############
# If Statusengine should save status data to Redis
# NOTICE: Redis is always required, to calculate internal statistics!
# So this switch will only determine, if Statusengine will save monitoring status results to redis as well
use_redis: 0
# Configuration of your redis server
redis:
address: 127.0.0.1
port: 6379
db: 0
############
# HISTORY DATA CONFIGURATION
############
# If this is 1, Statusengine will save the current host and service status also in your historical storage backend
# For example in MySQL or CrateDB
store_live_data_in_archive_backend: 1
# If Statusengine should save historical data to MySQL
# WARNING: Do not set use_mysql and use_crate to 1 at the same time!
use_mysql: 0
# Configuration of your MySQL server
mysql:
host: 127.0.0.1
port: 3306
username: statusengine
password: password
database: statusengine
encoding: utf8
# Dump MySQL Query Parameters on Error
# Determines if Statusengine will call PDOStatement::debugDumpParams (https://www.php.net/manual/de/pdostatement.debugdumpparams.php)
# in case of a MySQL error or not.
# Warning: This will produce massive output to stdout (or the syslog if Statusengine is running via systemd)
dump_mysql_query_parameters: 0
# If Statusengine should save historical data to CrateDB
# WARNING: Do not set use_crate and use_mysql to 1 at the same time!
use_crate: 1
# Configuration of your CrateDB Cluster
# This is an array of cluster nodes.
#
# It is recommended to you a load balancer in front of your CrateDB cluster!
# So you will have a single ip address where Statusengine is going to connect to
crate:
username: crate
password:
nodes:
- 127.0.0.1:4200
# - 192.168.56.101:4200
# - 192.168.56.102:4200
# Performance settings
# How many records get inserted in one statement
# This value effects: CrateDB, MySQL and Elasticsearch
# Recommendation for MySQL: 100
# Recommendation for CrateDB: 10000
number_of_bulk_records: 1000
# Timeout in seconds Statusengine will wait that number_of_bulk_records is reached until a flush get forced
# This value effects: CrateDB, MySQL and Elasticsearch
max_bulk_delay: 5
# Number of worker processes for service status records
# Target: Redis
number_servicestatus_worker: 1
# Number of worker processes for host status records
# Target: Redis
number_hoststatus_worker: 1
# Number of worker processes for logentry records
# Target: MySQL|CrateDB
number_logentry_worker: 1
# Number of worker processes for host and service
# state change records
# Target: MySQL|CrateDB
number_statechange_worker: 1
# Number of worker processes for host check results
# Target: MySQL|CrateDB
number_hostcheck_worker: 1
# Number of worker processes for service check results
# Target: MySQL|CrateDB
number_servicecheck_worker: 1
# Number of worker other queues like notifications
# Target: MySQL|CrateDB
number_notification_log_worker: 1
# Number of worker other queues like contactnotificationmethod, downtimes and acknowledgements
# Target: MySQL|CrateDB
number_misc_worker: 1
############
# PERFDATA DATA CONFIGURATION
############
# If statusengine should process performance data or not
# 1 = yes
# 0 = no
process_perfdata: 0
# Number of worker processes for service check results
# Target: You selected this at 'perfdata_backend' option
number_perfdata_worker: 1
# Uncomment to enable
# You can enable as much backends as you want
perfdata_backend:
- crate
# - graphite
# - mysql
# - elasticsearch
############
# GRAPHITE CONFIGURATION
############
# Every record in Graphite will be prefixed with the given key
# so multiple systems are able to read/write to the same system
graphite_prefix: statusengine
# Set the ip address or hostname for your Graphite system
# Statusengine Worker use the TCP plaintext protocol to store data
graphite_address: localhost
# Port where your Graphite server is listening to
graphite_port: 2003
# Every characters in the key which not match the given regex
# will be replace with an underscore _
graphite_illegal_characters: /[^a-zA-Z^0-9\-\.]/
############
# ELASTICSEARCH CONFIGURATION
############
# Statusengine will create an index template to store performance data to
# Elasticsearch.
# The template is hardcoded and will be managed by Statusengine
# automatically. How ever, you can still change
# important settings.
# If you change any template settings, you need to do this
# BEFORE THE FIRST start of Statusengine Worker,
# or you need to delete/edit the old template manually via Elasticsearch API
elasticsearch_template:
name: statusengine-metric
number_of_shards: 2
number_of_replicas: 0
refresh_interval: 15s
codec: best_compression
enable_source: 1
# Index that will be used to store data in Elasticsearch
elasticsearch_index: statusengine-metric-
# The value of elasticsearch_pattern will be added to the end of your
# defiend elasticsearch_index. It is recommended to terminate
# your elasticsearch_index with an dash, like the example
# index: statusengine-metric-
#
# Available patterns:
# - none => All data in one index, this will also disable deletion of old records!
# - daily => statusengine-metric-YYYY.MM.DD
# - weekly => statusengine-metric-GGGG.WW
# - monthly => statusengine-metric-YYYY.MM
elasticsearch_pattern: daily
# Set the ip address or hostname for your Elasticsearch system or cluster
# Statusengine will use the HTTP API
elasticsearch_address: 127.0.0.1
# Port where your Elasticsearch server is listening to
elasticsearch_port: 9200
############
# COMMAND ROUTER CONFIGURATION
############
check_for_commands: 1
# Interval to check for new commands in seconds
# Every check will fire a SQL query, to choose wisely
command_check_interval: 15
# External command file where Statusengine will pass external commands to.
# If you are using Nagios you MUST USE the nagios.cmd!!
# If you are not sure, of what you are doing, use the .cmd file :)
external_command_file: /opt/naemon/var/naemon.cmd
# Path to Naemon query handler.
# This is where Statusengine will pass external commands to the monitoring backend
# NOTICE! At the moment only Naemon supports to pass external commands through the query handler!
# If you are using Nagios, you need to use the nagios.cmd (External Command File)
# See: https://github.com/NagiosEnterprises/nagioscore/issues/364
query_handler: /opt/naemon/var/naemon.qh
# Pass external commands to the external commands file.
# Naemon and Nagios
#submit_method: cmd
# Pass external commands to the query handler
# Naemon only at the moment, but i but i recommend to use this, with Naemon
#submit_method: qh
# Pass external commands through the Statusengine Broker using the statusngin_cmd queue
# Requires to use the C++ Statusengine Broker: https://github.com/statusengine/broker
# Supports Naemon and Nagios
submit_method: broker
############
# SYSLOG CONFIGURATION
############
# If Statusengine Worker should write log messages to your syslog
# Enabled=1, disabled=0
syslog_enabled: 1
# The tag or ident of Statusengine Worker in your syslog
syslog_tag: statusengine-worker
############
# ARCHIVE AGE CONFIGURATION
############
# NOTICE:
# The Statusengine Database cleanup cronjob should only run at one node of your cluster
# You can run the cron on as many nodes as you want, but this will increase the load of the system.
# If you want to run the cronjob on more than one node, you should set different times for scheduling the cron
# For example at 01:00AM on node1 and at 01:00PM on node2 or so
# Cronjob usage:
# bin/Console.php cleanup -q (will run the cronjob without any output, perfect for crontab)
#
# bin/Console.php cleanup (will run the cronjob with output, perfect to check whats going on)
#
# In this section you can define, how long which data should be stored in the database
# Every value is in DAYs!
# Set 0 to disable automatic cleanup of a particular table
# Settings for Host related records
# How long should every executed check for a host be stored
age_hostchecks: 5
# How long should acknowledgement data of a host be stored
age_host_acknowledgements: 60
# How long should host notifications be stored
age_host_notifications: 60
# How long should host notifications log records be stored
age_host_notifications_log: 60
# How long should host state change records be stored
age_host_statehistory: 365
# How long should downtime data of a host be stored
age_host_downtimes: 60
# Settings for Service related records
# How long should every executed check for a service be stored
age_servicechecks: 5
# How long should acknowledgement data of a service be stored
age_service_acknowledgements: 60
# How long should service notifications be stored
age_service_notifications: 60
# How long should service notifications log records be stored
age_service_notifications_log: 60
# How long should service state change records be stored
age_service_statehistory: 365
# How long should downtime data of a service be stored
age_service_downtimes: 60
# Misc records
# How long should log entries records be stored
age_logentries: 5
# How long should unprocessed task in Statusengine's task queue be stored
age_tasks: 1
# For some perfdata backends, Statusengine is able to cleanup the database:
# - CrateDB
# - MySQL
# - Elasticsearch
# If you use Elasticsearch, don't set this value to less that your pattern is
# e.g.: daily => 2, weekly => 8, monthly => 32
# If your pattern is set to none, deletion of old records is disabled!
#
# Other backends to this by them self, so the age_perfdata value has no effect:
# - Graphite
age_perfdata: 90
############
# ENVIRONMENT CONFIGURATION
############
# Sometimes creepy proxies are get in the way and than we can't connect to the database backend
# or what every the proxy thinks to know about your connection
# Enable (1) this option to clear proxy environment variables (For Statusengine only)
# Disable (0) and Statusengine will use the proxy out of your environment
disable_http_proxy: 1