Skip to content

Commit

Permalink
chore: restore sample configuration files (#3122)
Browse files Browse the repository at this point in the history
  • Loading branch information
atzoum committed Mar 22, 2023
1 parent 7becd87 commit 0ac3a6e
Show file tree
Hide file tree
Showing 2 changed files with 362 additions and 0 deletions.
259 changes: 259 additions & 0 deletions config/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,259 @@
maxProcess: 12
enableProcessor: true
enableRouter: true
enableStats: true
statsTagsFormat: influxdb
HttpClient:
timeout: 30s
Http:
ReadTimeout: 0s
ReadHeaderTimeout: 0s
WriteTimeout: 10s
IdleTimeout: 720s
MaxHeaderBytes: 524288
RateLimit:
eventLimit: 1000
rateLimitWindow: 60m
noOfBucketsInWindow: 12
Gateway:
webPort: 8080
maxUserWebRequestWorkerProcess: 64
maxDBWriterProcess: 256
CustomVal: GW
maxUserRequestBatchSize: 128
maxDBBatchSize: 128
userWebRequestBatchTimeout: 15ms
dbBatchWriteTimeout: 5ms
maxReqSizeInKB: 4000
enableRateLimit: false
enableSuppressUserFeature: true
allowPartialWriteWithErrors: true
allowReqsWithoutUserIDAndAnonymousID: false
webhook:
batchTimeout: 20ms
maxBatchSize: 32
maxTransformerProcess: 64
maxRetry: 5
maxRetryTime: 10s
sourceListForParsingParams:
- shopify
EventSchemas:
enableEventSchemasFeature: false
syncInterval: 240s
noOfWorkers: 128
Debugger:
maxBatchSize: 32
maxESQueueSize: 1024
maxRetry: 3
batchTimeout: 2s
retrySleep: 100ms
LiveEvent:
cache:
size: 3
ttl: 20d
clearFreq: 5s
SourceDebugger:
disableEventUploads: false
DestinationDebugger:
disableEventDeliveryStatusUploads: false
TransformationDebugger:
disableTransformationStatusUploads: false
Archiver:
backupRowsBatchSize: 100
JobsDB:
fairPickup: true
jobDoneMigrateThres: 0.8
jobStatusMigrateThres: 5
maxDSSize: 100000
maxMigrateOnce: 10
maxMigrateDSProbe: 10
maxTableSizeInMB: 300
migrateDSLoopSleepDuration: 30s
addNewDSLoopSleepDuration: 5s
refreshDSListLoopSleepDuration: 5s
backupCheckSleepDuration: 5s
backupRowsBatchSize: 1000
archivalTimeInDays: 10
archiverTickerTime: 1440m
backup:
enabled: true
gw:
enabled: true
pathPrefix: ""
rt:
enabled: true
failedOnly: true
batch_rt:
enabled: false
failedOnly: false
gw:
enableWriterQueue: false
maxOpenConnections: 64
Router:
jobQueryBatchSize: 10000
updateStatusBatchSize: 1000
readSleep: 1000ms
fixedLoopSleep: 0ms
noOfJobsPerChannel: 1000
noOfJobsToBatchInAWorker: 20
jobsBatchTimeout: 5s
maxSleep: 60s
minSleep: 0s
maxStatusUpdateWait: 5s
useTestSink: false
guaranteeUserEventOrder: true
kafkaWriteTimeout: 2s
kafkaDialTimeout: 10s
minRetryBackoff: 10s
maxRetryBackoff: 300s
noOfWorkers: 64
allowAbortedUserJobsCountForProcessing: 1
maxFailedCountForJob: 3
retryTimeWindow: 180m
failedKeysEnabled: true
saveDestinationResponseOverride: false
transformerProxy: false
transformerProxyRetryCount: 15
GOOGLESHEETS:
noOfWorkers: 1
MARKETO:
noOfWorkers: 4
throttler:
algorithm: gcra
# redis:
# addr: localhost:6379
# username: ""
# password: ""
MARKETO:
limit: 45
timeWindow: 20s
# throttling by destinationID example below
# xxxyyyzzSOU9pLRavMf0GuVnWV3:
# limit: 90
# timeWindow: 10s
BRAZE:
forceHTTP1: true
httpTimeout: 120s
httpMaxIdleConnsPerHost: 32
BatchRouter:
mainLoopSleep: 2s
jobQueryBatchSize: 100000
uploadFreq: 30s
warehouseServiceMaxRetryTime: 3h
noOfWorkers: 8
maxFailedCountForJob: 128
retryTimeWindow: 180m
Warehouse:
mode: embedded
webPort: 8082
uploadFreq: 1800s
noOfWorkers: 8
noOfSlaveWorkerRoutines: 4
mainLoopSleep: 5s
minRetryAttempts: 3
retryTimeWindow: 180m
minUploadBackoff: 60s
maxUploadBackoff: 1800s
warehouseSyncPreFetchCount: 10
warehouseSyncFreqIgnore: false
stagingFilesBatchSize: 960
enableIDResolution: false
populateHistoricIdentities: false
enableJitterForSyncs: false
redshift:
maxParallelLoads: 3
snowflake:
maxParallelLoads: 3
bigquery:
maxParallelLoads: 20
postgres:
maxParallelLoads: 3
enableSQLStatementExecutionPlan: false
mssql:
maxParallelLoads: 3
azure_synapse:
maxParallelLoads: 3
clickhouse:
maxParallelLoads: 3
queryDebugLogs: false
blockSize: 1000
poolSize: 10
disableNullable: false
enableArraySupport: false
deltalake:
loadTableStrategy: MERGE
Processor:
webPort: 8086
loopSleep: 10ms
maxLoopSleep: 5000ms
fixedLoopSleep: 0ms
storeTimeout: 5m
maxLoopProcessEvents: 10000
transformBatchSize: 100
userTransformBatchSize: 200
maxConcurrency: 200
maxHTTPConnections: 100
maxHTTPIdleConnections: 50
maxRetry: 30
retrySleep: 100ms
errReadLoopSleep: 30s
errDBReadBatchSize: 1000
noOfErrStashWorkers: 2
maxFailedCountForErrJob: 3
enableEventCount: true
Stats:
captureEventName: false
Dedup:
enableDedup: false
dedupWindow: 3600s
memOptimized: true
BackendConfig:
configFromFile: false
configJSONPath: /etc/rudderstack/workspaceConfig.json
pollInterval: 5s
regulationsPollInterval: 300s
maxRegulationsPerRequest: 1000
Regulations:
pageSize: 50
pollInterval: 300s
recovery:
enabled: true
errorStorePath: /tmp/error_store.json
storagePath: /tmp/recovery_data.json
normal:
crashThreshold: 5
duration: 300s
Logger:
enableConsole: true
enableFile: false
consoleJsonFormat: false
fileJsonFormat: false
logFileLocation: /tmp/rudder_log.log
logFileSize: 100
enableTimestamp: true
enableFileNameInLog: true
enableStackTrace: false
Diagnostics:
enableDiagnostics: true
gatewayTimePeriod: 60s
routerTimePeriod: 60s
batchRouterTimePeriod: 6l
enableServerStartMetric: true
enableConfigIdentifyMetric: true
enableServerStartedMetric: true
enableConfigProcessedMetric: true
enableGatewayMetric: true
enableRouterMetric: true
enableBatchRouterMetric: true
enableDestinationFailuresMetric: true
RuntimeStats:
enabled: true
statsCollectionInterval: 10
enableCPUStats: true
enableMemStats: true
enableGCStats: true
PgNotifier:
retriggerInterval: 2s
retriggerCount: 500
trackBatchInterval: 2s
maxAttempt: 3
103 changes: 103 additions & 0 deletions config/sample.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
CONFIG_PATH=./config/config.yaml
JOBS_DB_HOST=localhost
JOBS_DB_USER=rudder
JOBS_DB_PASSWORD=rudder
JOBS_DB_PORT=5432
JOBS_DB_DB_NAME=jobsdb
JOBS_DB_SSL_MODE=disable

DEST_TRANSFORM_URL=http://localhost:9090
TEST_SINK_URL=http://localhost:8181

CONFIG_BACKEND_URL=https://api.rudderstack.com
CONFIG_BACKEND_TOKEN=<this is deprecating soon use WORKSPACE_TOKEN instead>
WORKSPACE_TOKEN=<your_token_here>

GO_ENV=production

LOG_LEVEL=INFO
INSTANCE_ID=1

STATSD_SERVER_URL=<stat_server_url>

WAREHOUSE_STAGING_BUCKET_FOLDER_NAME=rudder-warehouse-staging-logs
WAREHOUSE_BUCKET_LOAD_OBJECTS_FOLDER_NAME=rudder-warehouse-load-objects
DESTINATION_BUCKET_FOLDER_NAME=rudder-logs

# Uncomment the following for loading workspace config from a file
# RSERVER_BACKEND_CONFIG_CONFIG_FROM_FILE=true
RSERVER_BACKEND_CONFIG_CONFIG_JSONPATH=/home/user/workspaceConfig.json


# Alerting Pagerduty config
ALERT_PROVIDER=pagerduty
PG_ROUTING_KEY=<your_integration/routing_key>

# Alerting VictorOps Config
#ALERT_PROVIDER=victorops
#VICTOROPS_ROUTING_KEY=<your_victorops_routing_key>

# To capture table dumps in AWS S3, uncomment and add AWS IAM keys

# JOBS_BACKUP_STORAGE_PROVIDER=S3
# JOBS_BACKUP_BUCKET=<your_s3_bucket>
# JOBS_BACKUP_PREFIX=<prefix>
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=

# To capture table dumps in Azure, uncomment and add Azure storage account along with corresponding Azure credentials

# JOBS_BACKUP_STORAGE_PROVIDER=AZURE_BLOB
# JOBS_BACKUP_BUCKET=<your_azure_container>
# JOBS_BACKUP_PREFIX=<prefix>
# AZURE_STORAGE_ACCOUNT=
# AZURE_STORAGE_ACCESS_KEY=

# To capture table dumps in GCS, uncomment and add Google Cloud Storage credentials file path

# JOBS_BACKUP_STORAGE_PROVIDER=GCS
# JOBS_BACKUP_BUCKET=<your_gcs_bucket>
# JOBS_BACKUP_PREFIX=<prefix>
# GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials

# To capture table dumps in MINIO, uncomment and add MINIO Config keys

# JOBS_BACKUP_STORAGE_PROVIDER=MINIO
# JOBS_BACKUP_BUCKET=<your_minio_bucket>
# JOBS_BACKUP_PREFIX=<prefix>
# MINIO_ENDPOINT=localhost:9000
# MINIO_ACCESS_KEY_ID=
# MINIO_SECRET_ACCESS_KEY=
# MINIO_SSL=

# To capture table dumps in Spaces bucket, uncomment and add Spaces Config keys

# JOBS_BACKUP_STORAGE_PROVIDER=DIGITAL_OCEAN_SPACES
# JOBS_BACKUP_BUCKET=<your_spaces_bucket>
# JOBS_BACKUP_PREFIX=<prefix>
# DO_SPACES_ENDPOINT=
# DO_SPACES_ACCESS_KEY_ID=
# DO_SPACES_SECRET_ACCESS_KEY=

# Warehouse db configuration
WAREHOUSE_JOBS_DB_HOST=localhost
WAREHOUSE_JOBS_DB_USER=rudder
WAREHOUSE_JOBS_DB_PASSWORD=rudder
WAREHOUSE_JOBS_DB_SSL_MODE=disable
WAREHOUSE_JOBS_DB_PORT=5432
WAREHOUSE_JOBS_DB_DB_NAME=jobsdb

WAREHOUSE_URL=http://localhost:8082
CP_ROUTER_USE_TLS=true
# Destination connection testing
RUDDER_CONNECTION_TESTING_BUCKET_FOLDER_NAME=rudder-test-payload

# The below keys are used to enable 2-way ssl for Kafka. If you want to enable the same, use these keys.
# If these keys are used, please make sure the valid key and certificate are there in the corresponding value.
# KAFKA_SSL_CERTIFICATE_FILE_PATH=<certificate_file_path>
# KAFKA_SSL_KEY_FILE_PATH=<key_file_path>

# The below keys are used to give access to upload, download from s3 bucket and also used by copy command for REDSHIFT
# Use this if you don't to not give aws cred in control plane ex: S3, REDSHIFT
# RUDDER_AWS_S3_COPY_USER_ACCESS_KEY_ID=<rudder user access key>
# RUDDER_AWS_S3_COPY_USER_ACCESS_KEY=<rudder user access key secret>

0 comments on commit 0ac3a6e

Please sign in to comment.