Permalink
443 lines (375 sloc) 17.8 KB
#
# This file gives details of the various configuration parameters you can set
# when running stellar-core. You will need to edit to fit your own set up.
#
# This is a TOML file. See https://github.com/toml-lang/toml for syntax.
###########################
## General admin settings
# LOG_FILE_PATH (string) default "stellar-core.log"
# Path to the file you want stellar-core to write its log to.
# You can set to "" for no log file.
LOG_FILE_PATH=""
# BUCKET_DIR_PATH (string) default "buckets"
# Specifies the directory where stellar-core should store the bucket list.
# This will get written to a lot and will grow as the size of the ledger grows.
BUCKET_DIR_PATH="buckets"
# DATABASE (string) default "sqlite3://:memory:"
# Sets the DB connection string for SOCI.
# Defaults to an in memory database.
# If using sqlite, a string like:
#
# "sqlite3://path/to/dbname.db"
#
# alternatively, if using postgresql, a string like:
#
# "postgresql://dbname=stellar user=xxxx password=yyyy host=10.0.x.y"
#
# taking any combination of parameters from:
#
# http://www.postgresql.org/docs/devel/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
#
DATABASE="sqlite3://stellar.db"
# HTTP_PORT (integer) default 11626
# What port stellar-core listens for commands on.
HTTP_PORT=11626
# PUBLIC_HTTP_PORT (true or false) default false
# If false you only accept stellar commands from localhost.
# Do not set to true and expose the port to the open internet. This will allow
# random people to run stellar commands on your server. (such as `stop`)
PUBLIC_HTTP_PORT=false
# Maximum number of simultaneous HTTP clients
HTTP_MAX_CLIENT=128
# COMMANDS (list of strings) default is empty
# List of commands to run on startup.
# Right now only setting log levels really makes sense.
COMMANDS=[
"ll?level=info&partition=Herder"
]
# convenience mapping of common names to node IDs. The common names can be used
# in the .cfg. `$common_name`. If set, they will also appear in your logs
# instead of the less friendly nodeID.
NODE_NAMES=[
"GA22N4YGO7IJDRF2SISA5KHULGYYKDXBQGYIWUVNMSNHF5G2DNBKP3M5 eliza",
"GCDENOCHA6TQL6DFC4FS54HIH7RP7XR7VZCQZFANMGLT2WXJ7D7KGV2P hal9000"
]
###########################
## Configure which network this instance should talk to
NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015"
###########################
## Overlay configuration
# PEER_PORT (Integer) defaults to 11625
# The port other instances of stellar-core can connect to you on.
PEER_PORT=11625
# TARGET_PEER_CONNECTIONS (Integer) default 8
# This controls how aggressively the server will connect to other peers.
# It will send outbound connection attempts until it is at this
# number of peer connections.
TARGET_PEER_CONNECTIONS=8
# MAX_ADDITIONAL_PEER_CONNECTIONS (Integer) default -1
# Numbers of peers above the target that can connect to this instance
# This gives room for other peers to connect to this instance.
# Setting this too low will result in peers stranded out of the network
# -1: use TARGET_PEER_CONNECTIONS as value for this field
MAX_ADDITIONAL_PEER_CONNECTIONS=-1
# MAX_PEER_CONNECTIONS (Integer) default 12
# ~~ being deprecated in favor of MAX_ADDITIONAL_PEER_CONNECTIONS ~~~
# This server will start dropping peers if it is above this number of
# connected peers. The more peers you are connected to the higher
# the bandwidth requirements
# The effective value of this field is the maximum of this setting and
# (TARGET_PEER_CONNECTIONS + MAX_ADDITIONAL_PEER_CONNECTIONS)
MAX_PEER_CONNECTIONS=12
# MAX_PENDING_CONNECTIONS (Integer) default 5000
# Maximum number of pending (non authenticated) connections to this server.
# Next connections will be dropped immediately.
MAX_PENDING_CONNECTIONS=5000
# PEER_AUTHENTICATION_TIMEOUT (Integer) default 2
# This server will drop peer that does not authenticate itself during that
# time.
PEER_AUTHENTICATION_TIMEOUT=2
# PEER_TIMEOUT (Integer) default 30
# This server will drop peer that does not send or receive anything during that
# time when authenticated.
PEER_TIMEOUT=30
# PREFERRED_PEERS (list of strings) default is empty
# These are IP:port strings that this server will add to its DB of peers.
# This server will try to always stay connected to the other peers on this list.
PREFERRED_PEERS=["127.0.0.1:7000","127.0.0.1:8000"]
# PREFERRED_PEER_KEYS (list of strings) default is empty
# These are public key identities that this server will treat as preferred
# when connecting, similar to the PREFERRED_PEERS list.
# can use a name already defined in the .cfg
PREFERRED_PEER_KEYS=[
"GBKXI3TVIFHD6QDSNMUOTJFDWHDYDVRRPWIHN4IM2YFXIUEWDTY7DSSI",
"GBDOAYUPGQCPLJCP2HYJQ4W3ADODJFZISHRBQTQB7SFVR4BRUX46RYIP optional_common_name",
"$eliza"]
# PREFERRED_PEERS_ONLY (boolean) default is false
# When set to true, this peer will only connect to PREFERRED_PEERS and will
# only accept connections from PREFERRED_PEERS or PREFERRED_PEER_KEYS
PREFERRED_PEERS_ONLY=false
# Percentage, between 0 and 100, of system activity (measured in terms
# of both event-loop cycles and database time) below-which the system
# will consider itself "loaded" and attempt to shed load. Set this
# number low and the system will be tolerant of overloading. Set it
# high and the system will be intolerant. By default it is 0, meaning
# totally insensitive to overloading.
MINIMUM_IDLE_PERCENT=0
# KNOWN_PEERS (list of strings) default is empty
# These are IP:port strings that this server will add to its DB of peers.
# It will try to connect to these when it is below TARGET_PEER_CONNECTIONS.
KNOWN_PEERS=[
"core-testnet1.stellar.org",
"core-testnet2.stellar.org",
"core-testnet3.stellar.org"]
# KNOWN_CURSORS (list of strings) default is empty
# Set of cursors added at each startup with value '1'.
KNOWN_CURSORS=["HORIZON"]
#######################
## SCP settings
# NODE_SEED (string) default random, regenerated each run.
# The seed used for generating the public key this node will
# be identified with in SCP.
# Your seed should be unique. Protect your seed. Treat it like a password.
# If you don't set a NODE_SEED one will be generated for you randomly
# on each startup.
#
# To generate a new, stable seed (and associated public key), run:
#
# stellar-core --genseed
#
# You only need to keep the seed from this; you can always recover the
# public key from the seed by running:
#
# stellar-core --convertid <seed>
#
# This example also adds a common name to NODE_NAMES list named `self` with the
# public key associated to this seed
NODE_SEED="SBI3CZU7XZEWVXU7OZLW5MMUQAP334JFOPXSLTPOH43IRTEQ2QYXU5RG self"
# NODE_IS_VALIDATOR (boolean) default false.
# Only nodes that want to participate in SCP should set NODE_IS_VALIDATOR=true.
# Most instances should operate in observer mode with NODE_IS_VALIDATOR=false.
# See QUORUM_SET below.
NODE_IS_VALIDATOR=false
###########################
# Consensus settings
# FAILURE_SAFETY (integer) default -1
# Most people should leave this to -1
# This is the maximum number of validator failures from your QUORUM_SET that
# you want to be able to tolerate.
# Typically, you will need at least 3f+1 nodes in your quorum set.
# If you don't have enough nodes in your quorum set to tolerate the level you
# set here stellar-core won't run as a precaution.
# A value of -1 indicates to use (n-1)/3 (n being the number of nodes
# and groups from the top level of your QUORUM_SET)
# A value of 0 is only allowed if UNSAFE_QUORUM is set
# Note: The value of 1 below is the maximum number derived from the value of
# QUORUM_SET in this configuration file
FAILURE_SAFETY=1
# UNSAFE_QUORUM (true or false) default false
# Most people should leave this to false.
# If set to true allows to specify a potentially unsafe quorum set.
# Otherwise it won't start if
# a threshold % is set too low (threshold below 66% for the top level,
# 51% for other levels)
# FAILURE_SAFETY at 0 or above the number of failures that can occur
# You might want to set this if you are running your own network and
# aren't concerned with byzantine failures or if you fully understand how the
# quorum sets of other nodes relate to yours when it comes to
# quorum intersection.
UNSAFE_QUORUM=false
#########################
## History
# CATCHUP_COMPLETE (true or false) defaults to false
# if true will catchup to the network "completely" (replaying all history)
# if false will look for CATCHUP_RECENT for catchup settings
CATCHUP_COMPLETE=false
# CATCHUP_RECENT (integer) default to 0
# if CATCHUP_COMPLETE is true this option is ignored
# if set to 0 will catchup "minimally", using deltas to the most recent
# snapshot.
# if set to any other number, will catchup "minimally" to some past snapshot,
# then will replay history from that point to current snapshot, ensuring that
# at least CATCHUP_RECENT number of ledger entries will be present in database
# if "some past snapshot" is already present in database, it just replays all
# new history
CATCHUP_RECENT=1024
# MAX_CONCURRENT_SUBPROCESSES (integer) default 16
# History catchup can potentialy spawn a bunch of sub-processes.
# This limits the number that will be active at a time.
MAX_CONCURRENT_SUBPROCESSES=10
# AUTOMATIC_MAINTENANCE_PERIOD (integer, seconds) default 14400
# Interval between automatic maintenance executions
# Set to 0 to disable automatic maintenance
AUTOMATIC_MAINTENANCE_PERIOD=14400
# AUTOMATIC_MAINTENANCE_COUNT (integer) default 50000
# Number of unneeded ledgers in each table that will be removed during one
# maintenance run.
# Set to 0 to disable automatic maintenance
AUTOMATIC_MAINTENANCE_COUNT=5000
###############################
## The following options should probably never be set. They are used primarily
## for testing.
# RUN_STANDALONE (true or false) defaults to false
# This is a mode for testing. It prevents you from trying to connect
# to other peers
RUN_STANDALONE=false
# INVARIANT_CHECKS (list of strings) default is empty
# Setting this will cause specified invariants to be checked on ledger close and
# on bucket apply.
# Strings specified are matched (as regex) against the list of invariants.
# For example, to enable all invariants use ".*"
# List of invariants:
# - "AccountSubEntriesCountIsValid"
# Setting this will cause additional work on each operation apply - it
# checks if the change in the number of subentries of account (signers +
# offers + data + trustlines) equals the change in the value numsubentries
# store in account. This check is only performed for accounts modified in
# any way in given ledger.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "BucketListIsConsistentWithDatabase"
# Setting this will cause additional work on each bucket apply - it checks a
# variety of properties that should be satisfied by an applied bucket, for
# detailed information about what is checked see the comment in the header
# invariant/BucketListIsConsistentWithDatabase.h.
# The overhead may cause a system to catch-up more than once before being
# in sync with the network.
# - "CacheIsConsistentWithDatabase"
# Setting this will cause additional work on each operation apply - it
# checks if internal cache of ledger entries is consistent with content of
# database. It is equivalent to PARANOID_MODE from older versions of
# stellar-core.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "ConservationOfLumens"
# Setting this will cause additional work on each operation apply - it
# checks that the total number of lumens only changes during inflation.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "LedgerEntryIsValid"
# Setting this will cause additional work on each operation apply - it
# checks a variety of properties that must be true for a LedgerEntry to be
# valid.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "LiabilitiesMatchOffers"
# Setting this will cause additional work on each operation apply - it
# checks that accounts, trust lines, and offers satisfy all constraints
# associated with liabilities. For additional information, see the comment
# in the header invariant/LiabilitiesMatchOffers.h.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
INVARIANT_CHECKS = []
# MANUAL_CLOSE (true or false) defaults to false
# Mode for testing. Ledger will only close when stellar-core gets
# the `manualclose` command
MANUAL_CLOSE=false
# ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING (true or false) defaults to false
# Enables synthetic load generation on demand.
# The load is triggered by the `generateload` runtime command.
# This option only exists for stress-testing and should not be enabled in
# production networks.
ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=false
# ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING (true or false) defaults to false
# Reduces ledger close time to 1s and checkpoint frequency to every 8 ledgers.
# Do not ever set this in production, as it will make your history archives
# incompatible with those of anyone else.
ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=false
# ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING (in seconds), defaults to no override
# Overrides the close time to the specified value but does not change checkpoint
# frequency - this may cause network instability.
# Do not use in production.
ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING=0
# ALLOW_LOCALHOST_FOR_TESTING defaults to false
# Allows to connect to localhost, should not be enabled on production systems
# as this is a security threat.
ALLOW_LOCALHOST_FOR_TESTING=false
#####################
## Tables must come at the end. (TOML you are almost perfect!)
# HISTORY
# Used to specify where to fetch and store the history archives.
# Fetching and storing history is kept as general as possible.
# Any place you can save and load static files from should be usable by the
# stellar-core history system. s3, the file system, http, etc
# stellar-core will call any external process you specify and will pass it the
# name of the file to save or load.
# Simply use template parameters `{0}` and `{1}` in place of the files being transmitted or retrieved.
# You can specify multiple places to store and fetch from. stellar-core will
# use multiple fetching locations as backup in case there is a failure fetching from one.
#
# Note: any archive you *put* to you must run `$ stellar-core --newhist <historyarchive>`
# once before you start.
# for example this config you would run: $ stellar-core --newhist local
[HISTORY.local]
get="cp /tmp/stellar-core/history/vs/{0} {1}"
put="cp {0} /tmp/stellar-core/history/vs/{1}"
mkdir="mkdir -p /tmp/stellar-core/history/vs/{0}"
# other examples:
# [HISTORY.stellar]
# get="curl http://history.stellar.org/{0} -o {1}"
# put="aws s3 cp {0} s3://history.stellar.org/{1}"
# [HISTORY.backup]
# get="curl http://backupstore.blob.core.windows.net/backupstore/{0} -o {1}"
# put="azure storage blob upload {0} backupstore {1}"
#The history store of the Stellar testnet
#[HISTORY.h1]
#get="curl -sf http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}"
#[HISTORY.h2]
#get="curl -sf http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}"
#[HISTORY.h3]
#get="curl -sf http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}"
# QUORUM_SET is a required field
# This is how you specify this server's quorum set.
#
# It can be nested up to 2 levels: {A,B,C,{D,E,F},{G,H,{I,J,K,L}}}
# THRESHOLD_PERCENT is how many have to agree (1-100%) within a given set.
# Each set is treated as one vote.
# So for example in the above there are 5 things that can vote:
# individual validators: A,B,C, and the sets {D,E,F} and {G,H with subset {I,J,K,L}}
# the sets each have their own threshold.
# For example with {100% G,H with subset (50% I,J,K,L}}
# means that quorum will be met with G, H and any 2 (50%) of {I, J, K, L}
#
# a [QUORUM_SET.path] section is constructed as
# THRESHOLD_PERCENT: how many have to agree, defaults to 67 (rounds up).
# VALIDATORS: array of node IDs
# additional subsets [QUORUM_SET.path.item_number]
# a QUORUM_SET
# must not contain duplicate entries {{A,B},{A,C}} is invalid for example
# The key for "self" is implicitely added at the top level, so the effective
# quorum set is [t:2, self, QUORUM_SET]. Note that "self" is always agreeing
# with the instance (if QUORUM_SET includes it)
#
# The following setup is equivalent to the example given above.
#
# Note on naming: you can add common names to the NAMED_NODES list here as
# shown in the first 3 validators or use common names that have been
# previously defined.
[QUORUM_SET]
THRESHOLD_PERCENT=66
VALIDATORS=[
"GDQWITFJLZ5HT6JCOXYEVV5VFD6FTLAKJAUDKHAV3HKYGVJWA2DPYSQV A_from_above",
"GANLKVE4WOTE75MJS6FQ73CL65TSPYYMFZKC4VDEZ45LGQRCATGAIGIA B_from_above",
"GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ C_from_above"
]
[QUORUM_SET.1]
THRESHOLD_PERCENT=67
VALIDATORS=[
"$self", # 'D' from above is this node
"GDXJAZZJ3H5MJGR6PDQX3JHRREAVYNCVM7FJYGLZJKEHQV2ZXEUO5SX2 E_from_above",
"GB6GK3WWTZYY2JXWM6C5LRKLQ2X7INQ7IYTSECCG3SMZFYOZNEZR4SO5 F_from_above"
]
[QUORUM_SET.2]
THRESHOLD_PERCENT=100
VALIDATORS=[
"GCTAIXWDDBM3HBDHGSAOLY223QZHPS2EDROF7YUBB3GNYXLOCPV5PXUK G_from_above",
"GCJ6UBAOXNQFN3HGLCVQBWGEZO6IABSMNE2OCQC4FJAZXJA5AIE7WSPW H_from_above"
]
[QUORUM_SET.2.1]
THRESHOLD_PERCENT=50
VALIDATORS=[
"GC4X65TQJVI3OWAS4DTA2EN2VNZ5ZRJD646H5WKEJHO5ZHURDRAX2OTH I_from_above",
"GAXSWUO4RBELRQT5WMDLIKTRIKC722GGXX2GIGEYQZDQDLOTINQ4DX6F J_from_above",
"GAWOEMG7DQDWHCFDTPJEBYWRKUUZTX2M2HLMNABM42G7C7IAPU54GL6X K_from_above",
"GDZAJNUUDJFKTZX3YWZSOAS4S4NGCJ5RQAY7JPYBG5CUFL3JZ5C3ECOH L_from_above"
]