-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
147 lines (138 loc) · 3.71 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
version: '2.1'
#networks:
# log-arch:
# driver: bridge
# ipam:
# config:
# - subnet: 172.8.0.0/22
services:
nifi:
image: apache/nifi:latest
container_name: nifi
restart: always
environment:
- NIFI_WEB_HTTP_PORT=9090
volumes:
# - ./nifi/conf:/opt/nifi/nifi-current/conf
- ./nifi/content:/opt/nifi/nifi-current/content_repository
- ./nifi/db:/opt/nifi/nifi-current/database_repository
- ./nifi/flowfile:/opt/nifi/nifi-current/flowfile_repository
- ./nifi/state:/opt/nifi/nifi-current/state
- ./nifi/logs:/opt/nifi/nifi-current/logs
- ./nifi/data_store:/opt/nifi/nifi-current/data_store
ports:
- "9090:9090"
# networks:
# - log-arch
zookeeper:
image: wurstmeister/zookeeper
container_name: zookeeper
ports:
- "2181:2181"
restart: unless-stopped
kafka:
image: wurstmeister/kafka
container_name: kafka
ports:
- "9092:9092"
environment:
# - KAFKA_ADVERTISED_HOST_NAME=localhost
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_LISTENERS=INSIDE_LISTENER://0.0.0.0:29092,OUTSIDE_LISTENER://0.0.0.0:9092
- KAFKA_ADVERTISED_LISTENERS=INSIDE_LISTENER://kafka:29092,OUTSIDE_LISTENER://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INSIDE_LISTENER:PLAINTEXT,OUTSIDE_LISTENER:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME=INSIDE_LISTENER
- KAFKA_ADVERTISED_HOST_NAME=localhost
- KAFKA_CREATE_TOPICS=log_flow:1:1
restart: unless-stopped
depends_on:
- zookeeper
# networks:
# - log-arch
namenode:
image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
container_name: namenode
hostname: namenode
restart: unless-stopped
ports:
- "9870:9870"
- "8020:8020"
volumes:
- ./hdfs/namenode:/hadoop/dfs/name
environment:
- CLUSTER_NAME=test
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
# networks:
# - log-arch
datanode:
image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
container_name: datanode
hostname: datanode
restart: unless-stopped
volumes:
- ./hdfs/dataode:/hadoop/dfs/data
environment:
- SERVICE_PRECONDITION=namenode:9870
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
ports:
- "50075:50075"
- "50010:50010"
- "50020:50020"
depends_on:
- namenode
# networks:
# - log-arch
spark-master:
image: bde2020/spark-master:3.3.0-hadoop3.3
container_name: spark-master
hostname: spark-master
healthcheck:
interval: 5s
retries: 100
ports:
- "8080:8080"
- "7077:7077"
- "7040:7040"
- "7041:7041"
environment:
- INIT_DAEMON_STEP=false
- SPARK_DRIVER_HOST=192.168.1.5
volumes:
- ./spark-job:/opt/spark_store
# networks:
# - log-arch
spark-worker-1:
image: bde2020/spark-worker:3.3.0-hadoop3.3
container_name: spark-worker-1
hostname: spark-worker-1
depends_on:
- spark-master
ports:
- "8081:8081"
environment:
- "SPARK_MASTER=spark://spark-master:7077"
# volumes:
# - ./data/spark/:/opt/spark-data
# networks:
# - log-arch
cassandra:
image: cassandra:3.11.8
container_name: cassandra
ports:
- "9042:9042"
environment:
- "MAX_HEAP_SIZE=256M"
- "HEAP_NEWSIZE=128M"
restart: unless-stopped
volumes:
- ./cassandra/data:/var/lib/cassandra
- ./cql/schema.cql:/schema.cql
# run:
# image: "log-viz"
# container_name: "dash_viz"
# depends_on:
# - cassandra
# restart: unless-stopped
# ports:
# - "127.0.0.1:3032:3032"
# command: python3 visualization_dash/main.py