This repository has been archived by the owner on Mar 31, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 53
/
bigben.yaml
146 lines (137 loc) · 4.05 KB
/
bigben.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# top level modules
modules:
- name: domain
class: com.walmartlabs.bigben.providers.domain.cassandra.CassandraModule
- name: processors
object: com.walmartlabs.bigben.processors.ProcessorRegistry
- name: hz
class: com.walmartlabs.bigben.utils.hz.Hz
- name: scheduler
object: com.walmartlabs.bigben.SchedulerModule
- name: events
object: com.walmartlabs.bigben.EventModule
- name: messaging
object: com.walmartlabs.bigben.kafka.KafkaModule
enabled: ${kafka.module.enabled:-false}
- name: cron
object: com.walmartlabs.bigben.cron.CronRunner
enabled: ${cron.module.enabled:-false}
# hazelcast properties
hz:
template: file://hz.template.xml
group:
name: bigben-dev
password: bigben-dev
network:
autoIncrementPort: true
members: 127.0.0.1
port: 5701
map:
store:
writeDelay: 30
# message related properties
messaging.producer.factory.class: com.walmartlabs.bigben.kafka.KafkaMessageProducerFactory
# cassandra related properties
cassandra:
keyspace: bigben
cluster:
contactPoints: 127.0.0.1
clusterName: bigben-cluster
port: 9042
localDataCenter: null
coreConnectionsPerLocalHost: 1
maxConnectionsPerLocalHost: 1
coreConnectionsPerRemoteHost: 1
maxConnectionsPerRemoteHost: 1
maxRequestsPerLocalConnection: 32768
maxRequestsPerRemoteConnection: 2048
newLocalConnectionThreshold: 3000
newRemoteConnectionThreshold: 400
poolTimeoutMillis: 0
keepTCPConnectionAlive: true
connectionTimeOut: 5000
readTimeout: 12000
reconnectPeriod: 5
username: null
password: null
downgradingConsistency: false
writeConsistency: LOCAL_ONE
readConsistency: LOCAL_ONE
# kafka consumer properties
kafka:
consumers:
- num.consumers: ${num.consumers:-8}
processor.impl.class: com.walmartlabs.bigben.kafka.ProcessorImpl
topics: ${bigben.inbound.topic.name:-null}
max.poll.wait.time: ${max.poll.wait.time:-10000}
message.retry.max.count: ${message.retry.max.count:-10}
config:
key.deserializer: org.apache.kafka.common.serialization.StringDeserializer
value.deserializer: org.apache.kafka.common.serialization.StringDeserializer
bootstrap.servers: ${bigben.inbound.topic.bootstrap.servers:-null}
#fetch.min.bytes: 1
group.id: ${group.id:-bigben-inbound}
heartbeat.interval.ms: ${heartbeat.interval.ms:-3000}
session.timeout.ms: 30000
auto.offset.reset: ${auto.offset.reset:-latest}
fetch.max.bytes: 324000
max.poll.interval.ms: 30000
max.poll.records: 100
receive.buffer.bytes: 65536
request.timeout.ms: 60000
#send.buffer.bytes: 131072
enable.auto.commit: ${enable.auto.commit:-false}
producer:
config: # this is default kafka producer config, these values will be used if not supplied during the tenant registration
key.serializer: org.apache.kafka.common.serialization.StringSerializer
value.serializer: org.apache.kafka.common.serialization.StringSerializer
acks: "1"
buffer.memory: 32400
retries: 3
# system properties
task:
executor:
#retry.thread.count: 8
retry.time.units: SECONDS
delay: 1
max.retries: 3
backoff.multiplier: 2
app.server.port: 8080
generic.future.max.get.time: 60
events:
scheduler.enabled: true
schedule.scan.interval.minutes: 1
num.shard.submitters: 8
receiver:
shard.size: 1000
lapse.offset.minutes: 0
delete:
max.retries: 3
initial.delay: 1
backoff.multiplier: 1
submit:
initial.delay: 1
backoff.multiplier: 1
max.retries: 3
processor:
max.retries: 3
initial.delay: 1
backoff.multiplier: 2
eager.loading: true
tasks:
max.events.in.memory: 100000
scheduler.worker.threads: 8
# bucket manager / loader related properties
buckets:
backlog.check.limit: 1440 # 1 Day
background:
load.fetch.size: 100
load.wait.interval.seconds: 15
cron:
runner:
core.pool.size: 8
load:
max.retries: 10
delay: 1
backoff.multiplier: 1
time.units: "SECONDS"