Skip to content

Commit

Permalink
chore(core): updated replication settings in server.conf
Browse files Browse the repository at this point in the history
  • Loading branch information
amunra committed Jan 30, 2024
1 parent 0496ee3 commit 7889f45
Showing 1 changed file with 37 additions and 13 deletions.
50 changes: 37 additions & 13 deletions core/src/main/resources/io/questdb/site/conf/server.conf
Expand Up @@ -653,8 +653,14 @@ cairo.wal.enabled.default=true
#cairo.wal.purge.interval=30000

# Row count of how many rows are written to the same WAL segment before starting a new segment.
# Triggers in conjunction with `cairo.wal.segment.rollover.size` (whichever is first).
#cairo.wal.segment.rollover.row.count=200000

# Byte size of number of rows written to the same WAL segment before starting a new segment.
# Triggers in conjunction with `cairo.wal.segment.rollover.row.count` (whichever is first).
# By default this is 0 (disabled) unless `replication.role=primary` is set, then it is defaulted to 2MiB.
#cairo.wal.segment.rollover.size=0

# mmap sliding page size that WalWriter uses to append data for each column
#cairo.wal.writer.data.append.page.size=1M

Expand Down Expand Up @@ -752,20 +758,25 @@ cairo.wal.enabled.default=true
#pg.tls.cert.path=
#pg.tls.private.key.path=

#native.async.io.threads=<max>
#native.max.blocking.threads=<max>
## The number of threads dedicated for async IO operations (e.g. network activity) in native code.
#native.async.io.threads=<max/2>

## The number of threads dedicated for blocking IO operations (e.g. file access) in native code.
#native.max.blocking.threads=<max*2>

# Possible roles are PRIMARY, REPLICA, or NONE
# PRIMARY - read/write node
# REPLICA - read-only node, consuming data from PRIMARY
# NONE - replication is disabled
#replication.role=NONE
# Possible roles are "primary", "replica", or "none"
# primary - read/write node
# replica - read-only node, consuming data from PRIMARY
# none - replication is disabled
#replication.role=none

## Object-store specific string.
## AWS S3 example:
## s3::bucket=${BUCKET_NAME};root=${DB_INSTANCE_NAME};region=${AWS_REGION};access_key_id=${AWS_ACCESS_KEY};secret_access_key=${AWS_SECRET_ACCESS_KEY}
## s3::bucket=${BUCKET_NAME};root=${DB_INSTANCE_NAME};region=${AWS_REGION};access_key_id=${AWS_ACCESS_KEY};secret_access_key=${AWS_SECRET_ACCESS_KEY};
## Azure Blob example:
## azblob::endpoint=https://${STORE_ACCOUNT}.blob.core.windows.net;container={BLOB_CONTAINER};root=${DB_INSTANCE_NAME};account_name=${STORE_ACCOUNT};account_key=${STORE_KEY}
## azblob::endpoint=https://${STORE_ACCOUNT}.blob.core.windows.net;container={BLOB_CONTAINER};root=${DB_INSTANCE_NAME};account_name=${STORE_ACCOUNT};account_key=${STORE_KEY};
## Filesystem:
## fs::root=/nfs/path/to/dir/final;atomic_write_dir=/nfs/path/to/dir/scratch;
#replication.object.store=

## Limits the number of concurrent requests to the object store.
Expand All @@ -776,8 +787,21 @@ cairo.wal.enabled.default=true
## logging an error and reattempting later after a delay.
#replication.requests.retry.attempts=3

## Delay in seconds between the retry attempts
#replication.requests.retry.interval=10
## Delay between the retry attempts (milliseconds)
#replication.requests.retry.interval=200

## The time window grouping multiple transactions into a replication batch.
## Smaller time windows use more network traffic.
## Larger time windows increase the replication latency.
## Works in conjunction with `replication.primary.throttle.non.data`.
#replication.primary.throttle.window.duration=10000

## Set to `false` to allow immediate replication of non-data transactions
## such as table creation, rename, drop, and uploading of any closed WAL segments.
## Only set to `true` if your application is highly sensitive to network overhead.
## In most cases, tweaking `cairo.wal.segment.rollover.size` and
## `replication.primary.throttle.window.duration` instead.
#replication.primary.throttle.non.data=false

## Max number of threads used to perform file compression operations before
## uploading to the object store. The default value is calculated as half the
Expand All @@ -787,5 +811,5 @@ cairo.wal.enabled.default=true
## Zstd compression level. Defaults to 1. Valid values are from 1 to 22.
#replication.primary.compression.level=1

## Polling rate of a replica instance to check for the availability of new changes.
#replication.replica.poll.sec=1
## Polling rate of a replica instance to check for new changes (milliseconds).
#replication.replica.poll.interval=1000

0 comments on commit 7889f45

Please sign in to comment.