Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(conf): add new configuration settings to server.conf (including Enterprise settings) #4175

Merged
merged 4 commits into from Jan 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 7 additions & 7 deletions core/src/main/java/io/questdb/PropServerConfiguration.java
Expand Up @@ -354,7 +354,6 @@ public class PropServerConfiguration implements ServerConfiguration {
private final int walPurgeWaitBeforeDelete;
private final int walRecreateDistressedSequencerAttempts;
private final long walSegmentRolloverRowCount;
private final long walSegmentRolloverSize;
private final double walSquashUncommittedRowsMultiplier;
private final boolean walSupported;
private final int walTxnNotificationQueueCapacity;
Expand All @@ -377,6 +376,7 @@ public class PropServerConfiguration implements ServerConfiguration {
protected HttpServerConfiguration httpServerConfiguration = new PropHttpServerConfiguration();
protected JsonQueryProcessorConfiguration jsonQueryProcessorConfiguration = new PropJsonQueryProcessorConfiguration();
protected StaticContentProcessorConfiguration staticContentProcessorConfiguration;
protected long walSegmentRolloverSize;
private long cairoSqlCopyMaxIndexChunkSize;
private FactoryProvider factoryProvider;
private short floatDefaultColumnType;
Expand Down Expand Up @@ -910,7 +910,6 @@ public PropServerConfiguration(
this.sqlRenameTableModelPoolCapacity = getInt(properties, env, PropertyKey.CAIRO_SQL_RENAME_TABLE_MODEL_POOL_CAPACITY, 16);
this.sqlWithClauseModelPoolCapacity = getInt(properties, env, PropertyKey.CAIRO_SQL_WITH_CLAUSE_MODEL_POOL_CAPACITY, 128);
this.sqlInsertModelPoolCapacity = getInt(properties, env, PropertyKey.CAIRO_SQL_INSERT_MODEL_POOL_CAPACITY, 64);
this.sqlCopyModelPoolCapacity = getInt(properties, env, PropertyKey.CAIRO_SQL_COPY_MODEL_POOL_CAPACITY, 32);
this.sqlCopyBufferSize = getIntSize(properties, env, PropertyKey.CAIRO_SQL_COPY_BUFFER_SIZE, 2 * Numbers.SIZE_1MB);
this.columnPurgeQueueCapacity = getQueueCapacity(properties, env, PropertyKey.CAIRO_SQL_COLUMN_PURGE_QUEUE_CAPACITY, 128);
this.columnPurgeTaskPoolCapacity = getIntSize(properties, env, PropertyKey.CAIRO_SQL_COLUMN_PURGE_TASK_POOL_CAPACITY, 256);
Expand Down Expand Up @@ -1206,6 +1205,7 @@ public PropServerConfiguration(
final int defaultReduceShardCount = Math.min(sharedWorkerCount, 4);
this.cairoPageFrameReduceShardCount = getInt(properties, env, PropertyKey.CAIRO_PAGE_FRAME_SHARD_COUNT, defaultReduceShardCount);
this.sqlParallelFilterPreTouchEnabled = getBoolean(properties, env, PropertyKey.CAIRO_SQL_PARALLEL_FILTER_PRETOUCH_ENABLED, true);
this.sqlCopyModelPoolCapacity = getInt(properties, env, PropertyKey.CAIRO_SQL_COPY_MODEL_POOL_CAPACITY, sharedWorkerCount);

boolean defaultParallelSqlEnabled = sharedWorkerCount >= 4;
this.sqlParallelFilterEnabled = getBoolean(properties, env, PropertyKey.CAIRO_SQL_PARALLEL_FILTER_ENABLED, defaultParallelSqlEnabled);
Expand Down Expand Up @@ -2132,6 +2132,11 @@ public int getMaxFileNameLength() {
return maxFileNameLength;
}

@Override
public int getMaxSqlRecompileAttempts() {
return maxSqlRecompileAttempts;
}

@Override
public int getMaxSwapFileCount() {
return maxSwapFileCount;
Expand Down Expand Up @@ -2839,11 +2844,6 @@ public boolean isWriterMixedIOEnabled() {
public boolean mangleTableDirNames() {
return false;
}

@Override
public int getMaxSqlRecompileAttempts() {
return maxSqlRecompileAttempts;
}
}

private class PropHttpContextConfiguration implements HttpContextConfiguration {
Expand Down
104 changes: 86 additions & 18 deletions core/src/main/resources/io/questdb/site/conf/server.conf
Expand Up @@ -311,21 +311,51 @@ query.timeout.sec=60
# sets initial size of per-partition window function range frame buffer
#cairo.sql.window.initial.range.buffer.size=32

# size of CreateTableModel pool in SqlParser
# size of CreateTableModel pool in SqlParser
#cairo.sql.create.table.model.pool.capacity=16

# size of ColumnCastModel pool in SqlParser
# size of ColumnCastModel pool in SqlParser
#cairo.sql.column.cast.model.pool.capacity=16

# size of RenameTableModel pool in SqlParser
# size of RenameTableModel pool in SqlParser
#cairo.sql.rename.table.model.pool.capacity=16

# size of WithClauseModel pool in SqlParser
# size of WithClauseModel pool in SqlParser
#cairo.sql.with.clause.model.pool.capacity=128

# size of InsertModel pool in SqlParser
# size of InsertModel pool in SqlParser
#cairo.sql.insert.model.pool.capacity=64

# enables parallel GROUP BY execution; when enabled, parallel GROUP BY also requires at least 4 shared worker threads to take place
#cairo.sql.parallel.groupby.enabled=true

# merge queue capacity for parallel GROUP BY; used for parallel tasks that merge shard hash tables
#cairo.sql.parallel.groupby.merge.shard.queue.capacity=<auto>

# threshold for parallel GROUP BY to shard the hash table holding the aggregates
#cairo.sql.parallel.groupby.sharding.threshold=100000

# default size for memory buffers in GROUP BY function native memory allocator
#cairo.sql.groupby.allocator.default.chunk.size=128k

# maximum allowed native memory allocation for GROUP BY functions
#cairo.sql.groupby.allocator.max.chunk.size=4gb

# threshold in bytes for switching from single memory buffer hash table (unordered) to a hash table with separate heap for entries (ordered)
#cairo.sql.unordered.map.max.entry.size=24

## prevents stack overflow errors when evaluating complex nested SQLs
## the value is an approximate number of nested SELECT clauses.
#cairo.sql.window.max.recursion=128

## pre-sizes the internal data structure that stores active query executions
## the value is chosen automatically based on the number of threads in the shared worker pool
#cairo.sql.query.registry.pool.size=<auto>

## window function buffer size in record counts
## pre-sizes buffer for every windows function execution to contain window records
#cairo.sql.analytic.initial.range.buffer.size=32

#### SQL COPY

# size of CopyModel pool in SqlParser
Expand Down Expand Up @@ -431,6 +461,9 @@ query.timeout.sec=60
# Memory page size per column for O3 operations. Please be aware O3 will use 2x of this RAM per column
#cairo.o3.column.memory.size=8M

# Memory page size per column for O3 operations on System tables only
#cairo.system.o3.column.memory.size=256k

# Number of partition expected on average, initial value for purge allocation job, extended in runtime automatically
#cairo.o3.partition.purge.list.initial.capacity=1

Expand All @@ -448,6 +481,9 @@ query.timeout.sec=60
# mmap page size for appending value data; value data are rowids, e.g. number of rows in partition times 8 bytes
#cairo.writer.data.index.value.append.page.size=16M

# mmap sliding page size that TableWriter uses to append data for each column specifically for System tables
#cairo.system.writer.data.append.page.size=256k

# Maximum wait timeout in milliseconds for ALTER TABLE SQL statement run via REST and PG Wire interfaces when statement execution is ASYNCHRONOUS
#cairo.writer.alter.busy.wait.timeout=500

Expand Down Expand Up @@ -490,6 +526,7 @@ query.timeout.sec=60
#cairo.page.frame.task.pool.capacity=4

################ LINE settings ######################

#line.default.partition.by=DAY

# Enable / Disable automatic creation of new columns in existing tables via ILP. When set to false overrides value of line.auto.create.new.tables to false
Expand All @@ -514,6 +551,7 @@ query.timeout.sec=60
#line.udp.timestamp=n

######################### LINE TCP settings ###############################

#line.tcp.enabled=true
#line.tcp.net.bind.to=0.0.0.0:9009
#line.tcp.net.connection.limit=256
Expand Down Expand Up @@ -574,6 +612,12 @@ query.timeout.sec=60
# Minimum amount of idle time before a table writer is released in milliseconds
#line.tcp.min.idle.ms.before.writer.release=500

######################### LINE HTTP settings ###############################

#line.http.enabled=true

#line.http.ping.version=v2.2.2

################ PG Wire settings ##################

#pg.enabled=true
Expand Down Expand Up @@ -653,8 +697,14 @@ cairo.wal.enabled.default=true
#cairo.wal.purge.interval=30000

# Row count of how many rows are written to the same WAL segment before starting a new segment.
# Triggers in conjunction with `cairo.wal.segment.rollover.size` (whichever is first).
#cairo.wal.segment.rollover.row.count=200000

# Byte size of number of rows written to the same WAL segment before starting a new segment.
# Triggers in conjunction with `cairo.wal.segment.rollover.row.count` (whichever is first).
# By default this is 0 (disabled) unless `replication.role=primary` is set, then it is defaulted to 2MiB.
#cairo.wal.segment.rollover.size=0

# mmap sliding page size that WalWriter uses to append data for each column
#cairo.wal.writer.data.append.page.size=1M

Expand Down Expand Up @@ -752,20 +802,25 @@ cairo.wal.enabled.default=true
#pg.tls.cert.path=
#pg.tls.private.key.path=

#native.async.io.threads=<max>
#native.max.blocking.threads=<max>
## The number of threads dedicated for async IO operations (e.g. network activity) in native code.
#native.async.io.threads=<max/2>

## The number of threads dedicated for blocking IO operations (e.g. file access) in native code.
#native.max.blocking.threads=<max*2>

# Possible roles are PRIMARY, REPLICA, or NONE
# PRIMARY - read/write node
# REPLICA - read-only node, consuming data from PRIMARY
# NONE - replication is disabled
#replication.role=NONE
# Possible roles are "primary", "replica", or "none"
# primary - read/write node
# replica - read-only node, consuming data from PRIMARY
# none - replication is disabled
#replication.role=none

## Object-store specific string.
## AWS S3 example:
## s3::bucket=${BUCKET_NAME};root=${DB_INSTANCE_NAME};region=${AWS_REGION};access_key_id=${AWS_ACCESS_KEY};secret_access_key=${AWS_SECRET_ACCESS_KEY}
## s3::bucket=${BUCKET_NAME};root=${DB_INSTANCE_NAME};region=${AWS_REGION};access_key_id=${AWS_ACCESS_KEY};secret_access_key=${AWS_SECRET_ACCESS_KEY};
## Azure Blob example:
## azblob::endpoint=https://${STORE_ACCOUNT}.blob.core.windows.net;container={BLOB_CONTAINER};root=${DB_INSTANCE_NAME};account_name=${STORE_ACCOUNT};account_key=${STORE_KEY}
## azblob::endpoint=https://${STORE_ACCOUNT}.blob.core.windows.net;container={BLOB_CONTAINER};root=${DB_INSTANCE_NAME};account_name=${STORE_ACCOUNT};account_key=${STORE_KEY};
## Filesystem:
## fs::root=/nfs/path/to/dir/final;atomic_write_dir=/nfs/path/to/dir/scratch;
#replication.object.store=

## Limits the number of concurrent requests to the object store.
Expand All @@ -776,8 +831,21 @@ cairo.wal.enabled.default=true
## logging an error and reattempting later after a delay.
#replication.requests.retry.attempts=3

## Delay in seconds between the retry attempts
#replication.requests.retry.interval=10
## Delay between the retry attempts (milliseconds)
#replication.requests.retry.interval=200

## The time window grouping multiple transactions into a replication batch (milliseconds).
## Smaller time windows use more network traffic.
## Larger time windows increase the replication latency.
## Works in conjunction with `replication.primary.throttle.non.data`.
#replication.primary.throttle.window.duration=10000

## Set to `false` to allow immediate replication of non-data transactions
## such as table creation, rename, drop, and uploading of any closed WAL segments.
## Only set to `true` if your application is highly sensitive to network overhead.
## In most cases, tweak `cairo.wal.segment.rollover.size` and
## `replication.primary.throttle.window.duration` instead.
#replication.primary.throttle.non.data=false

## Max number of threads used to perform file compression operations before
## uploading to the object store. The default value is calculated as half the
Expand All @@ -787,5 +855,5 @@ cairo.wal.enabled.default=true
## Zstd compression level. Defaults to 1. Valid values are from 1 to 22.
#replication.primary.compression.level=1

## Polling rate of a replica instance to check for the availability of new changes.
#replication.replica.poll.sec=1
## Polling rate of a replica instance to check for new changes (milliseconds).
#replication.replica.poll.interval=1000