/
docker-compose.ha.yml
615 lines (583 loc) · 17.1 KB
/
docker-compose.ha.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
# This docker-compose file is provided as an example to create a Docker Swarm based MSActivator setup
version: "3.8"
x-es-configuration: &es-configuration
ES_CREDENTIALS: c3VwZXJ1c2VyOnheWnl1R002fnU9K2ZZMkc=
ES_SERVERS: "msa-es"
x-logging: &logging
driver: "json-file"
options:
mode: non-blocking
max-buffer-size: "4m"
max-size: "10m"
max-file: "5"
x-kafka-syslogs: &kafka-syslogs
KAFKA_SERVER: "kafka:9094"
KAFKA_TOPIC: "syslogs"
x-placement_app: &placement_app
replicas: 1
placement:
max_replicas_per_node: 1
constraints:
- "node.labels.worker==app"
# rsyslog ports are in mode host so we have to configure one replica per worker
x-placement_rsyslog: &placement_rsyslog
replicas: 1
placement:
max_replicas_per_node: 1
constraints:
- "node.labels.worker==app"
x-placement_db: &placement_db
replicas: 1
placement:
max_replicas_per_node: 1
constraints:
- "node.labels.worker==db"
x-healthcheck: &healthcheck
timeout: 10s
retries: 10
interval: 30s
start_period: 120s
services:
msa-front:
image: openmsa/openmsa:msa2-front-2.8.13-df02b100eeac062736e0852fc5f5778d26619610
depends_on:
- msa-api
- msa-ui
- camunda
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "curl -k --fail https://localhost"]
deploy:
<<: *placement_app
ports:
- target: 80
published: 80
protocol: tcp
mode: ingress
- target: 443
published: 443
protocol: tcp
mode: ingress
logging:
driver: "json-file"
options:
mode: non-blocking
max-buffer-size: "4m"
max-size: "10m"
max-file: "5"
volumes:
- "/mnt/NASVolume/msa_front_conf:/etc/nginx/custom_conf.d"
#
# uncomment one of the 2 sections below when installing a custom certificate
# - Docker standard standalone installation
#volumes:
# - "msa_front:/etc/nginx/ssl"
# - Docker Swarm HA installation
#volumes:
# - "/mnt/NASVolume/msa_front:/etc/nginx/ssl"
db:
image: openmsa/openmsa:msa2-db-2.8.13-b0c12101a8d1ee6f0c6d77ebbcc06a739ef7dc50
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/usr/pgsql-12/bin/pg_isready -h localhost"]
deploy:
<<: *placement_db
environment:
CAMUNDA_PASSWORD: camunda
CAMUNDA_DB: process-engine
CAMUNDA_USER: camunda
KEY_VAULT_USER: key_vault
KEY_VAULT_DB: key_vault
PG_MODE: primary
PG_PRIMARY_USER: postgres
PG_PRIMARY_PASSWORD: my_db_password
PG_USER: postgres
PG_PASSWORD: my_db_password
PG_DATABASE: POSTGRESQL
PG_ROOT_PASSWORD: my_db_password
PG_PRIMARY_PORT: 5432
MAX_CONNECTIONS: 1600
volumes:
- "/mnt/NASVolume/msa_db:/pgsqldata/pgsql"
shm_size: 1g
logging:
<<: *logging
db-replica:
image: openmsa/openmsa:msa2-db-2.8.13-b0c12101a8d1ee6f0c6d77ebbcc06a739ef7dc50
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/usr/pgsql-12/bin/pg_isready -h localhost"]
deploy:
<<: *placement_db
environment:
CAMUNDA_PASSWORD: camunda
CAMUNDA_DB: process-engine
CAMUNDA_USER: camunda
KEY_VAULT_USER: key_vault
KEY_VAULT_DB: key_vault
PG_MODE: replica
PG_PRIMARY_USER: postgres
PG_PRIMARY_PASSWORD: my_db_password
PG_USER: postgres
PG_PASSWORD: my_db_password
PG_DATABASE: POSTGRESQL
PG_ROOT_PASSWORD: my_db_password
PG_PRIMARY_PORT: 5432
PG_PRIMARY_HOST: db
logging:
<<: *logging
msa-api:
image: openmsa/openmsa:msa2-api-2.8.13-d358c69c0cbc49934f502ce3c1d6cd6f2ce2f070
depends_on:
- db
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "curl --fail http://localhost:8480"]
deploy:
<<: *placement_app
environment:
<<: *es-configuration
HOST_HOSTNAME: "{{.Node.Hostname}}"
volumes:
- "/mnt/NASVolume/msa_dev:/opt/devops/"
- "/mnt/NASVolume/rrd_repository:/opt/rrd"
- "/mnt/NASVolume/msa_entities:/opt/fmc_entities"
- "/mnt/NASVolume/msa_repository:/opt/fmc_repository"
- "/mnt/NASVolume/msa_api_keystore:/etc/pki/jentreprise"
- "/mnt/NASVolume/msa_api_logs:/opt/wildfly/logs/processLog"
logging:
<<: *logging
networks:
default:
aliases:
- "msa_api"
msa-ui:
image: openmsa/openmsa:msa2-ui-2.8.13-e740a17cfabc6c71d10b286b35cc1dcde3d632e6
depends_on:
- msa-api
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "curl --fail http://localhost:8080"]
deploy:
<<: *placement_app
environment:
- FEATURE_ADMIN=true
- FEATURE_REPOSITORY=true
- FEATURE_CONNECTION_STATUS=true
- FEATURE_ALARMS=true
- FEATURE_LICENCE=true
- FEATURE_TOPOLOGY=true
- FEATURE_MONITORING_PROFILES=true
- FEATURE_PROFILE_AUDIT_LOGS=true
- FEATURE_PERMISSION_PROFILES=true
- FEATURE_AI_ML=false
- FEATURE_MICROSERVICE_BULK_OPERATION=false
- FEATURE_EDIT_VARIABLES_IN_MICROSERVICE_CONSOLE=true
- FEATURE_WORKFLOW_OWNER=false
- FEATURE_PERMISSION_PROFILE_LABELS=false
- FEATURE_BPM=true
- UBIQUBE_ES_SECURITY_DISABLED=true
- FEATURE_ALARMS_AUTO_CLEARANCE=false
- FEATURE_IMPORT_WITH_SAME_AND_UPPERRANK=true
- FEATURE_REPOFOLDERLIST=[\"Datafiles\"]
logging:
<<: *logging
networks:
default:
aliases:
- "msa_ui"
msa-rsyslog:
depends_on:
- msa-parse
- kafka
image: openmsa/openmsa:msa2-rsyslog-2.8.13-ff52e9cdfb5452b114e94eb76bfb8679d380fa03
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "ps -p 1 -h -o%cpu | awk '{if ($$1 > 99) exit 1; else exit 0;}'"]
deploy:
<<: *placement_rsyslog
environment:
# ACTIONTYPE: omudpspoof or omkafka
# configure a specific port for TLS. Default is 6514
# TLS_SYSLOG_PORT: 6514
ACTIONTYPE: "omkafka"
<<: *kafka-syslogs
ports:
# on docker swarm rsyslog port can support only one protocol (TCP or UDP) per port and MUST be in host mode
- target: 514
published: 514
protocol: udp
mode: host
- target: 6514
published: 6514
protocol: tcp
mode: host
logging:
<<: *logging
networks:
default:
aliases:
- "msa_rsyslog"
msa-sms:
image: openmsa/openmsa:msa2-sms-2.8.13-afd10fc96bcd2882786061dca63cdcc8522c7290
depends_on:
- db
- msa-dev
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/etc/init.d/ubi-sms status | grep -q 'service seems UP' || exit 1"]
deploy:
<<: *placement_app
environment:
<<: *es-configuration
CONTAINER_DOCKNAME: "{{.Task.Name}}.{{.Node.Hostname}}"
HOST_HOSTNAME: "{{.Node.Hostname}}"
volumes:
- "/mnt/NASVolume/msa_sms_logs:/opt/sms/logs"
- "/mnt/NASVolume/msa_dev:/opt/devops/"
- "/mnt/NASVolume/msa_entities:/opt/fmc_entities"
- "/mnt/NASVolume/msa_repository:/opt/fmc_repository"
- "/mnt/NASVolume/msa_svn:/opt/svnroot"
- "msa_bulkfiles:/opt/sms/spool/parser"
- "msa_bulkfiles_err:/opt/sms/spool/parser-error"
ports:
- target: 69
published: 69
protocol: udp
mode: host
- target: 5200
published: 5200
protocol: udp
mode: host
logging:
<<: *logging
networks:
default:
aliases:
- "msa_sms"
msa-parse:
image: openmsa/openmsa:msa2-parse-2.8.13-3308b3a47e5772587f7afdacf9c518a8704af224
depends_on:
- db
- kafka
- msa-es
- msa-dev
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/etc/init.d/ubi-sms status | grep -q 'service seems UP' || exit 1"]
deploy:
<<: *placement_app
environment:
<<: [*es-configuration, *kafka-syslogs]
logging:
<<: *logging
volumes:
- "msa_sms_logs:/opt/sms/logs"
- "/mnt/NASVolume/msa_dev:/opt/devops/"
- "msa_parsebulkfiles:/opt/sms/spool/parser"
- "msa_parsebulkfiles_err:/opt/sms/spool/parser-error"
networks:
default:
aliases:
- "msa_parse"
msa-snmptrap:
image: openmsa/openmsa:msa2-snmptrap-2.8.13-be04c01213e52c2f0226019dd28f7f7e2fe9df05
depends_on:
- db
- msa-es
- msa-dev
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/etc/init.d/ubi-sms status | grep -q 'service seems UP' || exit 1"]
deploy:
<<: *placement_app
environment:
<<: *es-configuration
ports:
- target: 162
published: 162
protocol: udp
mode: host
logging:
<<: *logging
volumes:
- "msa_sms_logs:/opt/sms/logs"
- "/mnt/NASVolume/msa_dev:/opt/devops/"
- "msa_snmptrapbulkfiles:/opt/sms/spool/parser"
- "msa_snmptrapbulkfiles_err:/opt/sms/spool/parser-error"
networks:
default:
aliases:
- "msa_snmptrap"
msa-bud:
image: openmsa/openmsa:msa2-bud-2.8.13-e62a0723c91582e748a3a3d6cc4f011f2aae9d2b
depends_on:
- db
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/etc/init.d/ubi-bud status | grep -q 'service seems UP' || exit 1"]
environment:
- CONTAINER_DOCKNAME={{.Task.Name}}.{{.Node.Hostname}}
deploy:
<<: *placement_app
logging:
<<: *logging
volumes:
- "msa_sms_logs:/opt/sms/logs"
networks:
default:
aliases:
- "msa_bud"
msa-alarm:
depends_on:
- db
- msa-es
- msa-api
- msa-dev
image: openmsa/openmsa:msa2-alarm-2.8.13-8dc4632e7097a4ca9138a7edaef268d932964a5d
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/etc/init.d/ubi-alarm status | grep -q 'service seems UP' || exit 1"]
deploy:
<<: *placement_app
environment:
<<: *es-configuration
CONTAINER_DOCKNAME: "{{.Task.Name}}.{{.Node.Hostname}}"
volumes:
- "msa_sms_logs:/opt/sms/logs"
- "msa_alarmbulkfiles:/opt/sms/spool/alarms"
- "msa_alarmbulkfiles_err:/opt/sms/spool/alarms-error"
logging:
<<: *logging
networks:
default:
aliases:
- "msa_alarm"
msa-monitoring:
depends_on:
- db
- msa-es
- msa-dev
- msa-sms
image: openmsa/openmsa:msa2-monitoring-2.8.13-27feb5fa1bd40f309661a0d3787dfaa95a530b54
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "/etc/init.d/ubi-poll status | grep -q 'service seems UP' || exit 1"]
deploy:
<<: *placement_app
environment:
<<: *es-configuration
CONTAINER_DOCKNAME: "{{.Task.Name}}.{{.Node.Hostname}}"
volumes:
- "msa_sms_logs:/opt/sms/logs"
- "/mnt/NASVolume/msa_dev:/opt/devops/"
- "/mnt/NASVolume/msa_entities:/opt/fmc_entities"
- "/mnt/NASVolume/msa_repository:/opt/fmc_repository"
- "/mnt/NASVolume/rrd_repository:/opt/rrd"
- "msa_monitbulkfiles:/opt/sms/spool/parser"
- "msa_monitbulkfiles_err:/opt/sms/spool/parser-error"
logging:
<<: *logging
networks:
default:
aliases:
- "msa_monitoring"
kafka:
image: bitnami/kafka:3.5
healthcheck:
<<: *healthcheck
deploy:
<<: *placement_app
ports:
- "9094:9094"
environment:
- KAFKA_CFG_NODE_ID=0
- KAFKA_CFG_PROCESS_ROLES=controller,broker
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://:9092,EXTERNAL://kafka:9094
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=PLAINTEXT
- KAFKA_CFG_LOG_CLEANER_ENABLE=true
- KAFKA_CFG_LOG_CLEANUP_POLICY=delete
- KAFKA_CFG_LOG_RETENTION_BYTES=2000000000
- KAFKA_CFG_LOG_RETENTION_MS=86400000
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
- KAFKA_CFG_MESSAGE_MAX_BYTES=1048576
logging:
<<: *logging
volumes:
- "/mnt/NASVolume/kafka_data:/bitnami/kafka"
msa-broker:
depends_on:
- db
image: openmsa/openmsa:msa2-broker-2.8.13-a637ce7b1d31f9da37a22b0fdaad954dcdf0870e
healthcheck:
<<: *healthcheck
deploy:
<<: *placement_app
environment:
ARTEMIS_PASSWORD: simetraehcapa
ARTEMIS_USER: artemis
logging:
<<: *logging
volumes:
- "/mnt/NASVolume/mano_artemis:/var/lib/artemis-instance"
camunda:
depends_on:
- db
image: openmsa/openmsa:msa2-camunda-2.8.13-0f60a3c018c1b7dcde68259c9dadf71862373510
healthcheck:
<<: *healthcheck
deploy:
<<: *placement_app
environment:
DB_DRIVER: org.postgresql.Driver
DB_URL: 'jdbc:postgresql://db:5432/process-engine'
DB_USERNAME: camunda
DB_PASSWORD: camunda
DB_VALIDATE_ON_BORROW: 'true'
WAIT_FOR: 'db:5432'
WAIT_FOR_TIMEOUT: 60
logging:
<<: *logging
msa-kibana:
image: openmsa/openmsa:msa2-kibana-2.8.13-54e81a717d1f9aa7acc372643c6966e23b8b66fe
healthcheck:
<<: *healthcheck
deploy:
<<: *placement_app
environment:
ELASTICSEARCH_URL: "http://msa_es:9200"
ELASTICSEARCH_HOSTS: "http://msa_es:9200"
<<: *es-configuration
ports:
- "5601:5601"
logging:
<<: *logging
networks:
default:
aliases:
- "msa_kibana"
msa-es:
image: openmsa/openmsa:msa2-es-2.8.13-3d4d3a0c893492caea400fcd829e82f06ab75a2d
healthcheck:
<<: *healthcheck
test: ["CMD-SHELL", "test -f /home/install/init-done && curl -s -XGET -H 'Authorization: Basic c3VwZXJ1c2VyOnheWnl1R002fnU9K2ZZMkc=' 'http://localhost:9200/_cluster/health?pretty' | grep -q 'status.*green' || exit 1"]
deploy:
<<: *placement_app
environment:
discovery.type: "single-node"
script.painless.regex.enabled: "true"
bootstrap.memory_lock: "true"
xpack.security.enabled: "true"
ES_JAVA_OPTS: "-Xms512m -Xmx1024m"
<<: *es-configuration
ulimits:
memlock:
soft: -1
hard: -1
networks:
default:
aliases:
- "msa_es"
volumes:
- "/mnt/NASVolume/msa_es:/usr/share/elasticsearch/data"
logging:
<<: *logging
msa-cerebro:
image: openmsa/openmsa:msa2-cerebro-2.8.13-8374160f95e2349711204189a3bf8a385f7252b5
healthcheck:
<<: *healthcheck
deploy:
<<: *placement_app
environment:
AUTH_TYPE: basic
BASIC_AUTH_USER: cerebro
BASIC_AUTH_PWD: "N@X{M4tfw'5%)+35"
entrypoint:
- /opt/cerebro/bin/cerebro
- -Dhosts.0.host=http://msa_es:9200
ports:
- "9000:9000"
logging:
<<: *logging
networks:
default:
aliases:
- "msa_cerebro"
msa-dev:
image: openmsa/openmsa:msa2-linuxdev-2.8.13-f03103c0845b97ec88434fdb52744aef3c54c976
healthcheck:
<<: *healthcheck
deploy:
<<: *placement_app
volumes:
- "/mnt/NASVolume/msa_entities:/opt/fmc_entities"
- "/mnt/NASVolume/msa_repository:/opt/fmc_repository"
- "/mnt/NASVolume/msa_dev:/opt/devops/"
- "/mnt/NASVolume/msa_svn:/opt/svnroot"
- "/mnt/NASVolume/msa_api:/opt/ubi-jentreprise/generated/conf"
- "/mnt/NASVolume/msa_svn_ws:/opt/sms/spool/routerconfigs"
logging:
<<: *logging
networks:
default:
aliases:
- "msa_dev"
msa2-es-ilm:
image: openmsa/openmsa:msa2-es-ilm-2.8.13-9931c196f7c08839feea13e1426fc92b953ef311
tty: true
init: true
deploy:
replicas: 0
placement:
max_replicas_per_node: 1
networks:
default:
aliases:
- "msa2_es-ilm"
healthcheck:
test: ["CMD-SHELL", "find /opt/msa2-es-ilm/log/log_retention.log -type f -mmin -10"]
depends_on:
- msa-es
environment:
ELASTICSEARCH_URL: "msa_es:9200"
#For elasticsearch scripts /opt/ubi-es-ilm/log_retention_management.php
#UBI_ES_INDEX_MULTIPLE_TTL: "type:traffic|7d,type:event|30d,*|90d"
UBI_ES_INDEX_MULTIPLE_TTL: "*|90d"
UBI_ES_AUDIT_INDEX_MULTIPLE_TTL: "*|90d"
UBI_ES_LOG_SEARCH_INDEX_LIST: "ubilogs"
UBI_ES_RETENTION_INDEX_NAME: "ubilogs*"
UBI_ES_RETENTION_AUDIT_INDEX_NAME: "ubiaudit*"
UBI_ES_RETENTION_ALARM_INDEX_NAME: "ubialarm*"
UBI_ES_ALARM_INDEX_MULTIPLE_TTL: "*|90d"
UBI_ES_CACHE_INDEX_DEFAULT_TTL: "1w"
UBI_ES_DELETE_SCROLL_SIZE: "4000"
UBI_ES_MAX_DOCS: ""
UBI_ES_LOG_DETENTION_DELETE: "true"
UBI_ES_ILM_LOG_CRONTAB: "*/2 * * * * php /opt/ubi-es-ilm/log_retention_management.php --verbose=3 > /proc/1/fd/1 2>&1"
<<: *es-configuration
volumes:
- "/mnt/NASVolume/msa2_es-ilm:/opt/msa2-es-ilm"
volumes:
msa_sms_logs:
msa_monitbulkfiles:
msa_monitbulkfiles_err:
msa_parsebulkfiles:
msa_parsebulkfiles_err:
msa_alarmbulkfiles:
msa_alarmbulkfiles_err:
msa_bulkfiles:
msa_bulkfiles_err:
msa_snmptrapbulkfiles:
msa_snmptrapbulkfiles_err:
msa_es:
msa_es_config:
msa_api_logs:
msa2_es-ilm:
networks:
default:
#driver_opts:
#encrypted: "true"