Permalink
Switch branches/tags
vespa-6.286.54-1 vespa-6.285.17-1 vespa-6.284.14-1 vespa-6.282.83-1 vespa-6.281.43-1 vespa-6.280.15-1 vespa-6.279.46-1 vespa-6.278.1-1 vespa-6.277.15-1 vespa-6.276.19-1 vespa-6.274.30-1 vespa-6.271.25-1 vespa-6.269.2-1 vespa-6.268.1-1 vespa-6.267.49-1 vespa-6.266.22-1 vespa-6.265.30-1 vespa-6.264.15-1 vespa-6.263.46-1 vespa-6.262.3-1 vespa-6.259.36-1 vespa-6.258.8-1 vespa-6.257.14-1 vespa-6.255.2-1 vespa-6.254.16-1 vespa-6.252.3-1 vespa-6.251.16-1 vespa-6.250.10-1 vespa-6.249.9-1 vespa-6.248.120-1 vespa-6.247.2-1 vespa-6.246.92-1 vespa-6.245.70-1 vespa-6.244.127-1 vespa-6.243.159-1 vespa-6.242.39-1 vespa-6.241.65-1 vespa-6.240.88-1 vespa-6.235.43-1 vespa-6.234.71-1 vespa-6.233.17-1 vespa-6.232.4-1 vespa-6.231.18-1 vespa-6.230.174-1 vespa-6.229.8-1 vespa-6.228.142-1 vespa-6.227.36-1 vespa-6.226.445-1 vespa-6.225.3-1 vespa-6.224.75-1 vespa-6.223.147-1 vespa-6.222.24-1 vespa-6.221.26-1 vespa-6.220.20-1 vespa-6.219.12-1 vespa-6.218.63-1 vespa-6.217.11-1 vespa-6.216.133-1 vespa-6.215.9-1 vespa-6.214.72-1 vespa-6.213.147-1 vespa-6.212.47-1 vespa-6.211.26-1 vespa-6.210.41-1 vespa-6.209.26-1 vespa-6.208.4-1 vespa-6.207.23-1 vespa-6.206.147-1 vespa-6.205.22-1 vespa-6.204.21-1 vespa-6.203.29-1 vespa-6.202.135-1 vespa-6.201.10-1 vespa-6.200.24-1 vespa-6.199.105-1 vespa-6.198.37-1 vespa-6.197.154-1 vespa-6.196.148-1 vespa-6.195.55-1 vespa-6.194.21-1 vespa-6.193.124-1 vespa-6.192.4 vespa-6.192.4-1 vespa-6.191.14 vespa-6.191.14-1 vespa-6.189.17-1 vespa-6.188.3-1 vespa-6.187.27-1 vespa-6.186.84-1 vespa-6.185.9-1 vespa-6.184.43-1 vespa-6.183.169 vespa-6.183.169-1 vespa-6.182.28-1 vespa-6.181.23-1 vespa-6.179.30-1 vespa-6.178.22-1 vespa-6.177.31-1 vespa-6.176.233-1 vespa-6.175.31-1
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
172 lines (136 sloc) 7.82 KB
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
namespace=vespa.config.content
## Name of VDS cluster
cluster_name string restart
## The fleetcontroller index. Each fleetcontroller should have a unique index
## which can be used to identify them.
index int restart
## Number of fleet controllers. If more than one, the fleetcontroller needs to
## do master election in order to know which one is active. It then needs to
## know how many exist.
fleet_controller_count int default=1
## The number of seconds we can attempt to talk to zookeeper before our session
## time out and we lose our connection.
zookeeper_session_timeout double default=30.0
## When a master candidate see the master disappearing from ZooKeeper, it wants
## to take over as master. But, before taking over, the master should be aware
## that it has lost its zookeeper connection, so it will stop reacting as
## master. Suggests setting this to 2-3 times the zookeeper session timeout.
## (Set in number of seconds)
master_zookeeper_cooldown_period double default=60.0
## Sets how many fleetcontrollers will gather state. A fleetcontroller
## gathering state can take over quicker should ## the master fail.
## If set to 1, only master will gather state. If set higher, others will
## also do so, prioritizing those fleetcontrollers likely to be the ones to
## take over if the master fails.
state_gather_count int default=2
## Location of ZooKeeper servers
zookeeper_server string restart
## RPC Port used by fleetcontroller
rpc_port int default=6500 restart
## Port where fleetcontroller listens for HTTP status request
http_port int default=0 restart
## Maximum number of milliseconds a storage will be automatically reported in
## maintenance due to node recently being availble. (If 0, no time at all)
storage_transition_time int default=30000
## Maximum number of milliseconds a distributor will be automatically reported
## in maintenance due to node recently being availble. (If 0, no time at all)
##
## Currently default is 0.. Should probably be more then we know it is working
## correctly
distributor_transition_time int default=0
## Maximum number of milliseconds allowed between progress increase during
## initialization. If no progress have been made during this time period, the
## node will be considered down.
##
## Currently disabled as 5.0 nodes gets load while initializing which may be
## higher pri than initializing, so nodes can stay in init for a long time.
init_progress_time int default=0
## Minimum time (in ms) between system state updates. To limit updates in a
## system where a lot is happening at the same time, this value will make sure
## we dont change the state too often.
min_time_between_new_systemstates int default=10000
## Sets how many milliseconds to wait between each state poll for old nodes
## requiring state polling. (4.1 or older)
state_polling_frequency int default=5000
## The maximum amount of premature crashes a node is allowed to have in a row
## before the fleetcontroller disables that node.
max_premature_crashes int default=100000
## If a node has been down or up this many milliseconds, clear the premature
## crash count of a node and consider the node as stable
stable_state_time_period int default=7200000
## The maximum number of events to keep in the event log
event_log_max_size int default=1024
## The maximum number of node events to keep in the node event log per node
event_node_log_max_size int default=1024
## The total number of distributor nodes that can exist. If 0, we dont know and
## will use the highest distributor index number we have ever seen + 1.
total_distributor_count int default=0
## The total number of storage nodes that can exist. If 0, we dont know and
## will use the highest storage index number we have ever seen + 1.
total_storage_count int default=0
## The minimum number of distributor nodes that should be up for the cluster
## state to be up. (Retired nodes counts as up in this case)
min_distributors_up_count int default=1
## The minimum number of storage nodes that should be up for the cluster state
## to be up (Retired nodes counts as up in this case)
min_storage_up_count int default=1
## The minimum ratio of known distributor nodes that should be up (or retired)
## for the cluster state to stay up.
min_distributor_up_ratio double default=0.01
## The minimum ratio of known storage nodes that should be up (or retired) for
## the cluster state to stay up.
min_storage_up_ratio double default=0.01
## Seconds to sleep after doing a work cycle where we did no work. Some
## events do not interrupt the sleeping, such as slobrok changes, so shouldn't
## set this too high
cycle_wait_time double default=0.1
## Minimum time to pass in seconds before broadcasting our first systemstate as
## a new fleetcontroller. (Will broadcast earlier than this if we have gathered
## state from all before this). To prevent disturbance when taking over as
## fleetcontroller, give nodes a bit of time to answer so we dont temporarily
## report nodes as down.
min_time_before_first_system_state_broadcast double default=5.0
## Request timeout of node state requests. Keeping a high timeout allows us to
## always have a pending operation with very low cost. Keeping a low timeout is
## good to detect issues like packet loss. The default tries to balance the two
## by not resending too often, but detecting packet loss within a minute at
## least. If we can guarantee RPC layer to fail on packet loss within
## reasonable time we should increase this default.
get_node_state_request_timeout double default=120.0
## If a node is out of slobrok longer than this time period, assume the node
## is down, even if we have a pending node state request to it. Slobrok does
## a bit more keep alive checking than fleetcontroller, so it is possible that
## the node disappears from slobrok while it still looks ok in fleetcontroller.
max_slobrok_disconnect_grace_period double default=60.0
## Whether to show system states that have never been sent to storage nodes in
## the event log.
show_local_systemstates_in_event_log bool default=true
## The ideal number of distribution bits this system should have
ideal_distribution_bits int default=16
## Minimum ratio of nodes that have to be available (i.e. not Down) in any
## hierarchic content cluster group. If a higher ratio than this is Down at
## any point, the remaning nodes in the group will be automatically marked
## as down. Group nodes will automatically be taken back up as soon as node
## availability has been restored above the given threshold.
## Default is 0, i.e. functionality is for all intents and purposes disabled.
min_node_ratio_per_group double default=0.0
## If a cluster controller task has a dependency on a given cluster state
## version being published and ACKed by the cluster, it will be put on a wait
## queue while holding up the container thread associated with the task.
## This config specifies the maximum time a task can be held in this queue
## before being automatically failed out, if a version has not been ACKed
## within this duration.
max_deferred_task_version_wait_time_sec double default=30.0
## Switch to enable multiple bucket spaces in cluster controller.
enable_multiple_bucket_spaces bool default=false
## Whether or not the content cluster the controller has responsibility for
## contains any document types that are tagged as global. If this is true,
## global document-specific behavior is enabled that marks nodes down in the
## default space if they have merges pending in the global bucket space.
cluster_has_global_document_types bool default=false
## The minimum merge completion ratio of buckets in a bucket space before it is considered complete.
##
## Bucket merges are considered complete when:
## ((buckets_total - buckets_pending) / buckets_total)) >= min_merge_completion_ratio
min_merge_completion_ratio double default=1.0